summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/feature-removal-schedule.txt7
-rw-r--r--Documentation/networking/batman-adv.txt16
-rw-r--r--MAINTAINERS2
-rw-r--r--drivers/infiniband/hw/nes/nes.c3
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c2
-rw-r--r--drivers/net/Kconfig3
-rw-r--r--drivers/net/atl1c/atl1c_hw.c15
-rw-r--r--drivers/net/atl1c/atl1c_hw.h43
-rw-r--r--drivers/net/atl1c/atl1c_main.c1
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c12
-rw-r--r--drivers/net/atl1e/atl1e_hw.c34
-rw-r--r--drivers/net/atl1e/atl1e_hw.h111
-rw-r--r--drivers/net/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/atlx/atl1.c77
-rw-r--r--drivers/net/benet/be.h5
-rw-r--r--drivers/net/benet/be_cmds.c29
-rw-r--r--drivers/net/benet/be_cmds.h8
-rw-r--r--drivers/net/benet/be_ethtool.c40
-rw-r--r--drivers/net/benet/be_main.c209
-rw-r--r--drivers/net/bna/bnad.c108
-rw-r--r--drivers/net/bna/bnad.h2
-rw-r--r--drivers/net/bnx2.c8
-rw-r--r--drivers/net/bnx2.h4
-rw-r--r--drivers/net/bnx2x/bnx2x.h33
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c70
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h6
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c58
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h114
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c2527
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h34
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c591
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/bonding/bond_main.c20
-rw-r--r--drivers/net/bonding/bond_sysfs.c4
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/c_can/Kconfig15
-rw-r--r--drivers/net/can/c_can/Makefile8
-rw-r--r--drivers/net/can/c_can/c_can.c1158
-rw-r--r--drivers/net/can/c_can/c_can.h86
-rw-r--r--drivers/net/can/c_can/c_can_platform.c215
-rw-r--r--drivers/net/cnic.c170
-rw-r--r--drivers/net/cnic.h2
-rw-r--r--drivers/net/cnic_if.h8
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c5
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c1
-rw-r--r--drivers/net/e1000e/e1000.h5
-rw-r--r--drivers/net/e1000e/ethtool.c65
-rw-r--r--drivers/net/e1000e/ich8lan.c3
-rw-r--r--drivers/net/e1000e/lib.c4
-rw-r--r--drivers/net/e1000e/netdev.c123
-rw-r--r--drivers/net/e1000e/phy.c8
-rw-r--r--drivers/net/enic/Makefile2
-rw-r--r--drivers/net/enic/enic.h11
-rw-r--r--drivers/net/enic/enic_dev.c221
-rw-r--r--drivers/net/enic/enic_dev.h41
-rw-r--r--drivers/net/enic/enic_main.c324
-rw-r--r--drivers/net/enic/vnic_dev.c19
-rw-r--r--drivers/net/enic/vnic_dev.h8
-rw-r--r--drivers/net/enic/vnic_rq.h5
-rw-r--r--drivers/net/fec.c650
-rw-r--r--drivers/net/hamradio/bpqether.c5
-rw-r--r--drivers/net/igb/e1000_82575.c11
-rw-r--r--drivers/net/igb/e1000_hw.h1
-rw-r--r--drivers/net/igb/e1000_mbx.c38
-rw-r--r--drivers/net/igb/igb_main.c10
-rw-r--r--drivers/net/ixgbe/ixgbe.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c177
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h10
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c94
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.h23
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c115
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h24
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c211
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c34
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c22
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/jme.c306
-rw-r--r--drivers/net/jme.h87
-rw-r--r--drivers/net/loopback.c9
-rw-r--r--drivers/net/macvtap.c18
-rw-r--r--drivers/net/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/micrel.c24
-rw-r--r--drivers/net/ppp_generic.c148
-rw-r--r--drivers/net/sfc/efx.c31
-rw-r--r--drivers/net/sfc/efx.h2
-rw-r--r--drivers/net/sfc/ethtool.c10
-rw-r--r--drivers/net/sfc/net_driver.h66
-rw-r--r--drivers/net/sfc/nic.c51
-rw-r--r--drivers/net/sfc/regs.h6
-rw-r--r--drivers/net/sfc/selftest.c2
-rw-r--r--drivers/net/sfc/tx.c90
-rw-r--r--drivers/net/smc91x.c13
-rw-r--r--drivers/net/sungem.c58
-rw-r--r--drivers/net/sungem.h1
-rw-r--r--drivers/net/tg3.c161
-rw-r--r--drivers/net/tg3.h13
-rw-r--r--drivers/net/tlan.c3775
-rw-r--r--drivers/net/tlan.h192
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/typhoon.c3
-rw-r--r--drivers/net/veth.c12
-rw-r--r--drivers/net/via-velocity.c9
-rw-r--r--drivers/net/via-velocity.h8
-rw-r--r--drivers/net/vxge/vxge-config.c32
-rw-r--r--drivers/net/vxge/vxge-config.h10
-rw-r--r--drivers/net/vxge/vxge-main.c216
-rw-r--r--drivers/net/vxge/vxge-main.h23
-rw-r--r--drivers/net/vxge/vxge-traffic.c116
-rw-r--r--drivers/net/vxge/vxge-traffic.h14
-rw-r--r--drivers/net/vxge/vxge-version.h4
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c3
-rw-r--r--drivers/net/wireless/ath/ath.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig11
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h18
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c114
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h3
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c48
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c20
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h10
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c24
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h28
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c9
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c9
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c142
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c46
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h15
-rw-r--r--drivers/net/wireless/ath/ath5k/trace.h107
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h110
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c440
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h17
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c41
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c45
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c64
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c35
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c689
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c171
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h17
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c717
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c280
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h1
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c15
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h1
-rw-r--r--drivers/net/wireless/ath/carl9170/fwdesc.h28
-rw-r--r--drivers/net/wireless/ath/carl9170/hw.h25
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c9
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/version.h8
-rw-r--r--drivers/net/wireless/ath/carl9170/wlan.h20
-rw-r--r--drivers/net/wireless/ath/key.c5
-rw-r--r--drivers/net/wireless/ath/regd.c7
-rw-r--r--drivers/net/wireless/ath/regd.h1
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig26
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c560
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.c27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c39
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c55
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c79
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rx.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c43
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c263
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h30
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h28
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c117
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h61
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c201
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-legacy.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c27
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c7
-rw-r--r--drivers/net/wireless/libertas/cfg.c6
-rw-r--r--drivers/net/wireless/libertas/cmd.c10
-rw-r--r--drivers/net/wireless/libertas/dev.h2
-rw-r--r--drivers/net/wireless/libertas/if_spi.c368
-rw-r--r--drivers/net/wireless/libertas/main.c77
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c3
-rw-r--r--drivers/net/wireless/mwl8k.c456
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c165
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c159
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h10
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c107
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c228
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h35
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c74
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h24
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c44
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c60
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c226
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c49
-rw-r--r--drivers/net/wireless/rtlwifi/core.c3
-rw-r--r--drivers/net/wireless/wl1251/acx.c53
-rw-r--r--drivers/net/wireless/wl1251/acx.h72
-rw-r--r--drivers/net/wireless/wl1251/event.c18
-rw-r--r--drivers/net/wireless/wl1251/main.c18
-rw-r--r--drivers/net/wireless/wl1251/ps.c11
-rw-r--r--drivers/net/wireless/wl1251/rx.c49
-rw-r--r--drivers/net/wireless/wl1251/tx.c74
-rw-r--r--drivers/net/wireless/wl1251/wl1251.h7
-rw-r--r--drivers/net/wireless/wl12xx/acx.c160
-rw-r--r--drivers/net/wireless/wl12xx/acx.h91
-rw-r--r--drivers/net/wireless/wl12xx/boot.c35
-rw-r--r--drivers/net/wireless/wl12xx/cmd.c308
-rw-r--r--drivers/net/wireless/wl12xx/cmd.h147
-rw-r--r--drivers/net/wireless/wl12xx/conf.h76
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.c49
-rw-r--r--drivers/net/wireless/wl12xx/event.c7
-rw-r--r--drivers/net/wireless/wl12xx/event.h8
-rw-r--r--drivers/net/wireless/wl12xx/init.c387
-rw-r--r--drivers/net/wireless/wl12xx/init.h2
-rw-r--r--drivers/net/wireless/wl12xx/main.c1116
-rw-r--r--drivers/net/wireless/wl12xx/rx.c16
-rw-r--r--drivers/net/wireless/wl12xx/rx.h11
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c1
-rw-r--r--drivers/net/wireless/wl12xx/spi.c2
-rw-r--r--drivers/net/wireless/wl12xx/tx.c105
-rw-r--r--drivers/net/wireless/wl12xx/tx.h10
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx.h88
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h11
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c134
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h5
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c448
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h24
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c444
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h30
-rw-r--r--include/linux/audit.h2
-rw-r--r--include/linux/cpu_rmap.h73
-rw-r--r--include/linux/dcbnl.h5
-rw-r--r--include/linux/dccp.h2
-rw-r--r--include/linux/ethtool.h86
-rw-r--r--include/linux/if_link.h1
-rw-r--r--include/linux/inetdevice.h1
-rw-r--r--include/linux/interrupt.h33
-rw-r--r--include/linux/ip_vs.h8
-rw-r--r--include/linux/irqdesc.h3
-rw-r--r--include/linux/micrel_phy.h16
-rw-r--r--include/linux/netdevice.h212
-rw-r--r--include/linux/netfilter.h27
-rw-r--r--include/linux/netfilter/Kbuild6
-rw-r--r--include/linux/netfilter/ipset/Kbuild4
-rw-r--r--include/linux/netfilter/ipset/ip_set.h452
-rw-r--r--include/linux/netfilter/ipset/ip_set_ahash.h1074
-rw-r--r--include/linux/netfilter/ipset/ip_set_bitmap.h31
-rw-r--r--include/linux/netfilter/ipset/ip_set_getport.h21
-rw-r--r--include/linux/netfilter/ipset/ip_set_hash.h26
-rw-r--r--include/linux/netfilter/ipset/ip_set_list.h27
-rw-r--r--include/linux/netfilter/ipset/ip_set_timeout.h127
-rw-r--r--include/linux/netfilter/ipset/pfxlen.h35
-rw-r--r--include/linux/netfilter/nf_conntrack_snmp.h9
-rw-r--r--include/linux/netfilter/nfnetlink.h3
-rw-r--r--include/linux/netfilter/nfnetlink_conntrack.h9
-rw-r--r--include/linux/netfilter/x_tables.h3
-rw-r--r--include/linux/netfilter/xt_AUDIT.h30
-rw-r--r--include/linux/netfilter/xt_CT.h12
-rw-r--r--include/linux/netfilter/xt_NFQUEUE.h6
-rw-r--r--include/linux/netfilter/xt_TCPOPTSTRIP.h4
-rw-r--r--include/linux/netfilter/xt_TPROXY.h10
-rw-r--r--include/linux/netfilter/xt_cluster.h10
-rw-r--r--include/linux/netfilter/xt_comment.h2
-rw-r--r--include/linux/netfilter/xt_connlimit.h16
-rw-r--r--include/linux/netfilter/xt_conntrack.h15
-rw-r--r--include/linux/netfilter/xt_devgroup.h21
-rw-r--r--include/linux/netfilter/xt_quota.h8
-rw-r--r--include/linux/netfilter/xt_set.h56
-rw-r--r--include/linux/netfilter/xt_socket.h2
-rw-r--r--include/linux/netfilter/xt_time.h16
-rw-r--r--include/linux/netfilter/xt_u32.h18
-rw-r--r--include/linux/netfilter_bridge/ebt_802_3.h26
-rw-r--r--include/linux/netfilter_bridge/ebt_among.h4
-rw-r--r--include/linux/netfilter_bridge/ebt_arp.h6
-rw-r--r--include/linux/netfilter_bridge/ebt_ip.h14
-rw-r--r--include/linux/netfilter_bridge/ebt_ip6.h25
-rw-r--r--include/linux/netfilter_bridge/ebt_limit.h10
-rw-r--r--include/linux/netfilter_bridge/ebt_log.h8
-rw-r--r--include/linux/netfilter_bridge/ebt_mark_m.h6
-rw-r--r--include/linux/netfilter_bridge/ebt_nflog.h12
-rw-r--r--include/linux/netfilter_bridge/ebt_pkttype.h6
-rw-r--r--include/linux/netfilter_bridge/ebt_stp.h26
-rw-r--r--include/linux/netfilter_bridge/ebt_ulog.h4
-rw-r--r--include/linux/netfilter_bridge/ebt_vlan.h10
-rw-r--r--include/linux/netfilter_ipv4/ipt_CLUSTERIP.h16
-rw-r--r--include/linux/netfilter_ipv4/ipt_ECN.h8
-rw-r--r--include/linux/netfilter_ipv4/ipt_SAME.h8
-rw-r--r--include/linux/netfilter_ipv4/ipt_TTL.h6
-rw-r--r--include/linux/netfilter_ipv4/ipt_addrtype.h16
-rw-r--r--include/linux/netfilter_ipv4/ipt_ah.h6
-rw-r--r--include/linux/netfilter_ipv4/ipt_ecn.h10
-rw-r--r--include/linux/netfilter_ipv4/ipt_ttl.h6
-rw-r--r--include/linux/netfilter_ipv6/ip6t_HL.h6
-rw-r--r--include/linux/netfilter_ipv6/ip6t_REJECT.h4
-rw-r--r--include/linux/netfilter_ipv6/ip6t_ah.h10
-rw-r--r--include/linux/netfilter_ipv6/ip6t_frag.h10
-rw-r--r--include/linux/netfilter_ipv6/ip6t_hl.h6
-rw-r--r--include/linux/netfilter_ipv6/ip6t_ipv6header.h8
-rw-r--r--include/linux/netfilter_ipv6/ip6t_mh.h6
-rw-r--r--include/linux/netfilter_ipv6/ip6t_opts.h12
-rw-r--r--include/linux/netfilter_ipv6/ip6t_rt.h13
-rw-r--r--include/linux/pkt_sched.h41
-rw-r--r--include/linux/skbuff.h11
-rw-r--r--include/linux/xfrm.h1
-rw-r--r--include/net/cfg80211.h3
-rw-r--r--include/net/dst.h119
-rw-r--r--include/net/dst_ops.h1
-rw-r--r--include/net/flow.h3
-rw-r--r--include/net/icmp.h3
-rw-r--r--include/net/ieee80211_radiotap.h25
-rw-r--r--include/net/inet_sock.h8
-rw-r--r--include/net/inetpeer.h44
-rw-r--r--include/net/ip6_fib.h1
-rw-r--r--include/net/ip_fib.h27
-rw-r--r--include/net/ip_vs.h297
-rw-r--r--include/net/mac80211.h78
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--include/net/netevent.h1
-rw-r--r--include/net/netfilter/nf_conntrack.h23
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h12
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h10
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h6
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h2
-rw-r--r--include/net/netfilter/nf_conntrack_timestamp.h65
-rw-r--r--include/net/netfilter/nf_nat.h6
-rw-r--r--include/net/netfilter/nf_nat_core.h4
-rw-r--r--include/net/netlink.h9
-rw-r--r--include/net/netns/conntrack.h4
-rw-r--r--include/net/netns/ip_vs.h143
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/protocol.h4
-rw-r--r--include/net/route.h7
-rw-r--r--include/net/sch_generic.h59
-rw-r--r--include/net/sock.h2
-rw-r--r--include/net/tcp.h14
-rw-r--r--include/net/udp.h2
-rw-r--r--include/net/xfrm.h1
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/irq/manage.c82
-rw-r--r--lib/Kconfig4
-rw-r--r--lib/Makefile2
-rw-r--r--lib/cpu_rmap.c269
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/9p/trans_rdma.c1
-rw-r--r--net/Kconfig6
-rw-r--r--net/batman-adv/Makefile2
-rw-r--r--net/batman-adv/aggregation.c2
-rw-r--r--net/batman-adv/aggregation.h2
-rw-r--r--net/batman-adv/bat_debugfs.c6
-rw-r--r--net/batman-adv/bat_debugfs.h2
-rw-r--r--net/batman-adv/bat_sysfs.c2
-rw-r--r--net/batman-adv/bat_sysfs.h2
-rw-r--r--net/batman-adv/bitarray.c2
-rw-r--r--net/batman-adv/bitarray.h2
-rw-r--r--net/batman-adv/gateway_client.c2
-rw-r--r--net/batman-adv/gateway_client.h2
-rw-r--r--net/batman-adv/gateway_common.c2
-rw-r--r--net/batman-adv/gateway_common.h2
-rw-r--r--net/batman-adv/hard-interface.c13
-rw-r--r--net/batman-adv/hard-interface.h6
-rw-r--r--net/batman-adv/hash.c2
-rw-r--r--net/batman-adv/hash.h7
-rw-r--r--net/batman-adv/icmp_socket.c3
-rw-r--r--net/batman-adv/icmp_socket.h4
-rw-r--r--net/batman-adv/main.c3
-rw-r--r--net/batman-adv/main.h17
-rw-r--r--net/batman-adv/originator.c4
-rw-r--r--net/batman-adv/originator.h2
-rw-r--r--net/batman-adv/packet.h3
-rw-r--r--net/batman-adv/ring_buffer.c2
-rw-r--r--net/batman-adv/ring_buffer.h2
-rw-r--r--net/batman-adv/routing.c27
-rw-r--r--net/batman-adv/routing.h7
-rw-r--r--net/batman-adv/send.c7
-rw-r--r--net/batman-adv/send.h4
-rw-r--r--net/batman-adv/soft-interface.c3
-rw-r--r--net/batman-adv/soft-interface.h2
-rw-r--r--net/batman-adv/translation-table.c3
-rw-r--r--net/batman-adv/translation-table.h4
-rw-r--r--net/batman-adv/types.h2
-rw-r--r--net/batman-adv/unicast.c34
-rw-r--r--net/batman-adv/unicast.h25
-rw-r--r--net/batman-adv/vis.c2
-rw-r--r--net/batman-adv/vis.h2
-rw-r--r--net/bridge/br_device.c17
-rw-r--r--net/bridge/br_if.c15
-rw-r--r--net/bridge/br_private.h2
-rw-r--r--net/bridge/netfilter/ebt_ip6.c46
-rw-r--r--net/bridge/netfilter/ebtables.c1
-rw-r--r--net/caif/cfcnfg.c2
-rw-r--r--net/caif/cfdgml.c1
-rw-r--r--net/caif/cfserl.c1
-rw-r--r--net/caif/cfutill.c2
-rw-r--r--net/caif/cfveil.c2
-rw-r--r--net/core/dev.c355
-rw-r--r--net/core/dst.c43
-rw-r--r--net/core/ethtool.c529
-rw-r--r--net/core/filter.c6
-rw-r--r--net/core/neighbour.c13
-rw-r--r--net/core/net-sysfs.c17
-rw-r--r--net/core/pktgen.c234
-rw-r--r--net/core/rtnetlink.c86
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/dccp/ccids/ccid2.c9
-rw-r--r--net/decnet/dn_route.c22
-rw-r--r--net/decnet/dn_table.c1
-rw-r--r--net/ipv4/Kconfig42
-rw-r--r--net/ipv4/Makefile4
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/ah4.c25
-rw-r--r--net/ipv4/devinet.c78
-rw-r--r--net/ipv4/fib_frontend.c63
-rw-r--r--net/ipv4/fib_hash.c1133
-rw-r--r--net/ipv4/fib_lookup.h2
-rw-r--r--net/ipv4/fib_rules.c12
-rw-r--r--net/ipv4/fib_semantics.c125
-rw-r--r--net/ipv4/fib_trie.c217
-rw-r--r--net/ipv4/icmp.c49
-rw-r--r--net/ipv4/inetpeer.c52
-rw-r--r--net/ipv4/ip_input.c2
-rw-r--r--net/ipv4/netfilter/Kconfig3
-rw-r--r--net/ipv4/netfilter/arp_tables.c2
-rw-r--r--net/ipv4/netfilter/ip_tables.c2
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c7
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c3
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c17
-rw-r--r--net/ipv4/netfilter/nf_nat_amanda.c8
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c33
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c9
-rw-r--r--net/ipv4/route.c726
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv4/xfrm4_policy.c4
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/icmp.c16
-rw-r--r--net/ipv6/ip6_output.c5
-rw-r--r--net/ipv6/ndisc.c4
-rw-r--r--net/ipv6/netfilter/ip6_tables.c2
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c3
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c3
-rw-r--r--net/ipv6/raw.c14
-rw-r--r--net/ipv6/route.c68
-rw-r--r--net/ipv6/sit.c23
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/mac80211/agg-rx.c7
-rw-r--r--net/mac80211/agg-tx.c23
-rw-r--r--net/mac80211/cfg.c26
-rw-r--r--net/mac80211/debugfs_netdev.c122
-rw-r--r--net/mac80211/driver-ops.h6
-rw-r--r--net/mac80211/driver-trace.h213
-rw-r--r--net/mac80211/ibss.c3
-rw-r--r--net/mac80211/ieee80211_i.h14
-rw-r--r--net/mac80211/iface.c9
-rw-r--r--net/mac80211/main.c54
-rw-r--r--net/mac80211/mesh.c4
-rw-r--r--net/mac80211/mlme.c82
-rw-r--r--net/mac80211/offchannel.c68
-rw-r--r--net/mac80211/rx.c103
-rw-r--r--net/mac80211/scan.c91
-rw-r--r--net/mac80211/sta_info.c3
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/mac80211/status.c4
-rw-r--r--net/mac80211/tx.c29
-rw-r--r--net/mac80211/work.c66
-rw-r--r--net/mac80211/wpa.c39
-rw-r--r--net/netfilter/Kconfig66
-rw-r--r--net/netfilter/Makefile9
-rw-r--r--net/netfilter/core.c20
-rw-r--r--net/netfilter/ipset/Kconfig121
-rw-r--r--net/netfilter/ipset/Makefile24
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c587
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c652
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c515
-rw-r--r--net/netfilter/ipset/ip_set_core.c1671
-rw-r--r--net/netfilter/ipset/ip_set_getport.c141
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c464
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c544
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c562
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c628
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c458
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c578
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c584
-rw-r--r--net/netfilter/ipset/pfxlen.c291
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c98
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c195
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c376
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c892
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c134
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c61
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c67
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c72
-rw-r--r--net/netfilter/ipvs/ip_vs_nfct.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_pe.c17
-rw-r--r--net/netfilter/ipvs/ip_vs_pe_sip.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c129
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_ah_esp.c45
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c153
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c142
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c110
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c1239
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c26
-rw-r--r--net/netfilter/nf_conntrack_broadcast.c82
-rw-r--r--net/netfilter/nf_conntrack_core.c57
-rw-r--r--net/netfilter/nf_conntrack_expect.c34
-rw-r--r--net/netfilter/nf_conntrack_extend.c11
-rw-r--r--net/netfilter/nf_conntrack_helper.c20
-rw-r--r--net/netfilter/nf_conntrack_netbios_ns.c74
-rw-r--r--net/netfilter/nf_conntrack_netlink.c49
-rw-r--r--net/netfilter/nf_conntrack_proto.c24
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c1
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c14
-rw-r--r--net/netfilter/nf_conntrack_snmp.c77
-rw-r--r--net/netfilter/nf_conntrack_standalone.c45
-rw-r--r--net/netfilter/nf_conntrack_timestamp.c120
-rw-r--r--net/netfilter/nf_log.c6
-rw-r--r--net/netfilter/nf_queue.c82
-rw-r--r--net/netfilter/nfnetlink_log.c6
-rw-r--r--net/netfilter/nfnetlink_queue.c22
-rw-r--r--net/netfilter/x_tables.c98
-rw-r--r--net/netfilter/xt_AUDIT.c204
-rw-r--r--net/netfilter/xt_CLASSIFY.c36
-rw-r--r--net/netfilter/xt_IDLETIMER.c2
-rw-r--r--net/netfilter/xt_LED.c2
-rw-r--r--net/netfilter/xt_NFQUEUE.c34
-rw-r--r--net/netfilter/xt_connlimit.c62
-rw-r--r--net/netfilter/xt_conntrack.c75
-rw-r--r--net/netfilter/xt_cpu.c2
-rw-r--r--net/netfilter/xt_devgroup.c82
-rw-r--r--net/netfilter/xt_iprange.c18
-rw-r--r--net/netfilter/xt_ipvs.c2
-rw-r--r--net/netfilter/xt_set.c359
-rw-r--r--net/packet/af_packet.c38
-rw-r--r--net/rds/rds.h1
-rw-r--r--net/rose/af_rose.c7
-rw-r--r--net/rose/rose_route.c28
-rw-r--r--net/sched/Kconfig28
-rw-r--r--net/sched/Makefile3
-rw-r--r--net/sched/act_api.c46
-rw-r--r--net/sched/act_csum.c2
-rw-r--r--net/sched/act_gact.c8
-rw-r--r--net/sched/act_ipt.c16
-rw-r--r--net/sched/act_mirred.c4
-rw-r--r--net/sched/act_nat.c2
-rw-r--r--net/sched/act_pedit.c10
-rw-r--r--net/sched/act_police.c9
-rw-r--r--net/sched/act_simple.c10
-rw-r--r--net/sched/act_skbedit.c8
-rw-r--r--net/sched/cls_api.c33
-rw-r--r--net/sched/cls_basic.c17
-rw-r--r--net/sched/cls_cgroup.c8
-rw-r--r--net/sched/cls_flow.c6
-rw-r--r--net/sched/cls_fw.c38
-rw-r--r--net/sched/cls_route.c126
-rw-r--r--net/sched/cls_rsvp.h95
-rw-r--r--net/sched/cls_tcindex.c2
-rw-r--r--net/sched/cls_u32.c77
-rw-r--r--net/sched/em_cmp.c47
-rw-r--r--net/sched/em_meta.c44
-rw-r--r--net/sched/em_nbyte.c3
-rw-r--r--net/sched/em_text.c3
-rw-r--r--net/sched/em_u32.c2
-rw-r--r--net/sched/ematch.c37
-rw-r--r--net/sched/sch_api.c169
-rw-r--r--net/sched/sch_atm.c16
-rw-r--r--net/sched/sch_cbq.c362
-rw-r--r--net/sched/sch_choke.c677
-rw-r--r--net/sched/sch_dsmark.c21
-rw-r--r--net/sched/sch_fifo.c22
-rw-r--r--net/sched/sch_generic.c40
-rw-r--r--net/sched/sch_gred.c85
-rw-r--r--net/sched/sch_hfsc.c37
-rw-r--r--net/sched/sch_htb.c106
-rw-r--r--net/sched/sch_mq.c1
-rw-r--r--net/sched/sch_mqprio.c416
-rw-r--r--net/sched/sch_multiq.c8
-rw-r--r--net/sched/sch_netem.c8
-rw-r--r--net/sched/sch_prio.c34
-rw-r--r--net/sched/sch_red.c61
-rw-r--r--net/sched/sch_sfq.c67
-rw-r--r--net/sched/sch_tbf.c39
-rw-r--r--net/sched/sch_teql.c36
-rw-r--r--net/unix/af_unix.c66
-rw-r--r--net/wanrouter/wanmain.c2
-rw-r--r--net/wireless/core.c20
-rw-r--r--net/wireless/nl80211.c6
-rw-r--r--net/wireless/reg.c6
-rw-r--r--net/wireless/util.c47
-rw-r--r--net/wireless/wext-compat.c5
-rw-r--r--net/xfrm/xfrm_policy.c2
645 files changed, 36853 insertions, 14573 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index b3f35e5..057602d 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -604,6 +604,13 @@ Who: Jean Delvare <khali@linux-fr.org>
----------------------------
+What: xt_connlimit rev 0
+When: 2012
+Who: Jan Engelhardt <jengelh@medozas.de>
+Files: net/netfilter/xt_connlimit.c
+
+----------------------------
+
What: noswapaccount kernel command line parameter
When: 2.6.40
Why: The original implementation of memsw feature enabled by
diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt
index 77f0cdd..18afcd8 100644
--- a/Documentation/networking/batman-adv.txt
+++ b/Documentation/networking/batman-adv.txt
@@ -1,4 +1,4 @@
-[state: 21-11-2010]
+[state: 27-01-2011]
BATMAN-ADV
----------
@@ -67,15 +67,16 @@ All mesh wide settings can be found in batman's own interface
folder:
# ls /sys/class/net/bat0/mesh/
-# aggregated_ogms bonding fragmentation orig_interval
-# vis_mode
+# aggregated_ogms gw_bandwidth hop_penalty
+# bonding gw_mode orig_interval
+# fragmentation gw_sel_class vis_mode
There is a special folder for debugging informations:
# ls /sys/kernel/debug/batman_adv/bat0/
-# originators socket transtable_global transtable_local
-# vis_data
+# gateways socket transtable_global vis_data
+# originators softif_neigh transtable_local
Some of the files contain all sort of status information regard-
@@ -230,9 +231,8 @@ CONTACT
Please send us comments, experiences, questions, anything :)
IRC: #batman on irc.freenode.org
-Mailing-list: b.a.t.m.a.n@b.a.t.m.a.n@lists.open-mesh.org
- (optional subscription at
- https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
+Mailing-list: b.a.t.m.a.n@open-mesh.org (optional subscription
+ at https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
You can also contact the Authors:
diff --git a/MAINTAINERS b/MAINTAINERS
index 5dd6c75..1eacf29 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1693,6 +1693,7 @@ S: Supported
F: scripts/checkpatch.pl
CISCO VIC ETHERNET NIC DRIVER
+M: Christian Benvenuti <benve@cisco.com>
M: Vasanthy Kolluri <vkolluri@cisco.com>
M: Roopa Prabhu <roprabhu@cisco.com>
M: David Wang <dwang2@cisco.com>
@@ -5145,6 +5146,7 @@ RALINK RT2X00 WIRELESS LAN DRIVER
P: rt2x00 project
M: Ivo van Doorn <IvDoorn@gmail.com>
M: Gertjan van Wingerde <gwingerde@gmail.com>
+M: Helmut Schaa <helmut.schaa@googlemail.com>
L: linux-wireless@vger.kernel.org
L: users@rt2x00.serialmonkey.com (moderated for non-subscribers)
W: http://rt2x00.serialmonkey.com/
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 3b4ec32..3d7f366 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -153,7 +153,8 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
nesdev, nesdev->netdev[0]->name);
netdev = nesdev->netdev[0];
nesvnic = netdev_priv(netdev);
- is_bonded = (netdev->master == event_netdev);
+ is_bonded = netif_is_bond_slave(netdev) &&
+ (netdev->master == event_netdev);
if ((netdev == event_netdev) || is_bonded) {
if (nesvnic->rdma_enabled == 0) {
nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since"
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 009ec81..ec3aa11 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1118,7 +1118,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
return rc;
}
- if (nesvnic->netdev->master)
+ if (netif_is_bond_slave(netdev))
netdev = nesvnic->netdev->master;
else
netdev = nesvnic->netdev;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 0382332..65027a7 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1944,7 +1944,8 @@ config 68360_ENET
config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
- MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28
+ IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC
+ default IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC if ARM
select PHYLIB
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index 1bf6720..23f2ab0 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -345,7 +345,7 @@ int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data)
*/
static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
{
- u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_SPEED_MASK;
+ u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_ALL;
u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP &
~GIGA_CR_1000T_SPEED_MASK;
@@ -373,7 +373,7 @@ static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
}
if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 ||
- atl1c_write_phy_reg(hw, MII_GIGA_CR, mii_giga_ctrl_data) != 0)
+ atl1c_write_phy_reg(hw, MII_CTRL1000, mii_giga_ctrl_data) != 0)
return -1;
return 0;
}
@@ -517,19 +517,18 @@ int atl1c_phy_init(struct atl1c_hw *hw)
"Error Setting up Auto-Negotiation\n");
return ret_val;
}
- mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG;
+ mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
break;
case MEDIA_TYPE_100M_FULL:
- mii_bmcr_data |= BMCR_SPEED_100 | BMCR_FULL_DUPLEX;
+ mii_bmcr_data |= BMCR_SPEED100 | BMCR_FULLDPLX;
break;
case MEDIA_TYPE_100M_HALF:
- mii_bmcr_data |= BMCR_SPEED_100;
+ mii_bmcr_data |= BMCR_SPEED100;
break;
case MEDIA_TYPE_10M_FULL:
- mii_bmcr_data |= BMCR_SPEED_10 | BMCR_FULL_DUPLEX;
+ mii_bmcr_data |= BMCR_FULLDPLX;
break;
case MEDIA_TYPE_10M_HALF:
- mii_bmcr_data |= BMCR_SPEED_10;
break;
default:
if (netif_msg_link(adapter))
@@ -657,7 +656,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw)
err = atl1c_phy_setup_adv(hw);
if (err)
return err;
- mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG;
+ mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
}
diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h
index 3dd67597..655fc6c 100644
--- a/drivers/net/atl1c/atl1c_hw.h
+++ b/drivers/net/atl1c/atl1c_hw.h
@@ -736,55 +736,16 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
#define REG_DEBUG_DATA0 0x1900
#define REG_DEBUG_DATA1 0x1904
-/* PHY Control Register */
-#define MII_BMCR 0x00
-#define BMCR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define BMCR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
-#define BMCR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
-#define BMCR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
-#define BMCR_ISOLATE 0x0400 /* Isolate PHY from MII */
-#define BMCR_POWER_DOWN 0x0800 /* Power down */
-#define BMCR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
-#define BMCR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define BMCR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
-#define BMCR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
-#define BMCR_SPEED_MASK 0x2040
-#define BMCR_SPEED_1000 0x0040
-#define BMCR_SPEED_100 0x2000
-#define BMCR_SPEED_10 0x0000
-
-/* PHY Status Register */
-#define MII_BMSR 0x01
-#define BMMSR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
-#define BMSR_JABBER_DETECT 0x0002 /* Jabber Detected */
-#define BMSR_LINK_STATUS 0x0004 /* Link Status 1 = link */
-#define BMSR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
-#define BMSR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
-#define BMSR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
-#define BMSR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
-#define BMSR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
-#define BMSR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
-#define BMSR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
-#define BMSR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
-#define BMSR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
-#define BMSR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
-#define BMMII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
-#define BMMII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
-
-#define MII_PHYSID1 0x02
-#define MII_PHYSID2 0x03
#define L1D_MPW_PHYID1 0xD01C /* V7 */
#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */
#define L1D_MPW_PHYID3 0xD01E /* V8 */
/* Autoneg Advertisement Register */
-#define MII_ADVERTISE 0x04
-#define ADVERTISE_SPEED_MASK 0x01E0
-#define ADVERTISE_DEFAULT_CAP 0x0DE0
+#define ADVERTISE_DEFAULT_CAP \
+ (ADVERTISE_ALL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)
/* 1000BASE-T Control Register */
-#define MII_GIGA_CR 0x09
#define GIGA_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port 0=DTE device */
#define GIGA_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master 0=Configure PHY as Slave */
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 3824382..e60595f 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -2718,7 +2718,6 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
goto err_reset;
}
- device_init_wakeup(&pdev->dev, 1);
/* reset the controller to
* put the device in a known good starting state */
err = atl1c_phy_init(&adapter->hw);
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index 6943a6c..1209297 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -95,18 +95,18 @@ static int atl1e_set_settings(struct net_device *netdev,
ecmd->advertising = hw->autoneg_advertised |
ADVERTISED_TP | ADVERTISED_Autoneg;
- adv4 = hw->mii_autoneg_adv_reg & ~MII_AR_SPEED_MASK;
+ adv4 = hw->mii_autoneg_adv_reg & ~ADVERTISE_ALL;
adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK;
if (hw->autoneg_advertised & ADVERTISE_10_HALF)
- adv4 |= MII_AR_10T_HD_CAPS;
+ adv4 |= ADVERTISE_10HALF;
if (hw->autoneg_advertised & ADVERTISE_10_FULL)
- adv4 |= MII_AR_10T_FD_CAPS;
+ adv4 |= ADVERTISE_10FULL;
if (hw->autoneg_advertised & ADVERTISE_100_HALF)
- adv4 |= MII_AR_100TX_HD_CAPS;
+ adv4 |= ADVERTISE_100HALF;
if (hw->autoneg_advertised & ADVERTISE_100_FULL)
- adv4 |= MII_AR_100TX_FD_CAPS;
+ adv4 |= ADVERTISE_100FULL;
if (hw->autoneg_advertised & ADVERTISE_1000_FULL)
- adv9 |= MII_AT001_CR_1000T_FD_CAPS;
+ adv9 |= ADVERTISE_1000FULL;
if (adv4 != hw->mii_autoneg_adv_reg ||
adv9 != hw->mii_1000t_ctrl_reg) {
diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c
index 76cc043..923063d 100644
--- a/drivers/net/atl1e/atl1e_hw.c
+++ b/drivers/net/atl1e/atl1e_hw.c
@@ -318,7 +318,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
* Advertisement Register (Address 4) and the 1000 mb speed bits in
* the 1000Base-T control Register (Address 9).
*/
- mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
+ mii_autoneg_adv_reg &= ~ADVERTISE_ALL;
mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK;
/*
@@ -327,44 +327,37 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
*/
switch (hw->media_type) {
case MEDIA_TYPE_AUTO_SENSOR:
- mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
- MII_AR_10T_FD_CAPS |
- MII_AR_100TX_HD_CAPS |
- MII_AR_100TX_FD_CAPS);
- hw->autoneg_advertised = ADVERTISE_10_HALF |
- ADVERTISE_10_FULL |
- ADVERTISE_100_HALF |
- ADVERTISE_100_FULL;
+ mii_autoneg_adv_reg |= ADVERTISE_ALL;
+ hw->autoneg_advertised = ADVERTISE_ALL;
if (hw->nic_type == athr_l1e) {
- mii_1000t_ctrl_reg |=
- MII_AT001_CR_1000T_FD_CAPS;
+ mii_1000t_ctrl_reg |= ADVERTISE_1000FULL;
hw->autoneg_advertised |= ADVERTISE_1000_FULL;
}
break;
case MEDIA_TYPE_100M_FULL:
- mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_100FULL;
hw->autoneg_advertised = ADVERTISE_100_FULL;
break;
case MEDIA_TYPE_100M_HALF:
- mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_100_HALF;
hw->autoneg_advertised = ADVERTISE_100_HALF;
break;
case MEDIA_TYPE_10M_FULL:
- mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_10_FULL;
hw->autoneg_advertised = ADVERTISE_10_FULL;
break;
default:
- mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_10_HALF;
hw->autoneg_advertised = ADVERTISE_10_HALF;
break;
}
/* flow control fixed to enable all */
- mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
+ mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
@@ -374,7 +367,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
return ret_val;
if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
- ret_val = atl1e_write_phy_reg(hw, MII_AT001_CR,
+ ret_val = atl1e_write_phy_reg(hw, MII_CTRL1000,
mii_1000t_ctrl_reg);
if (ret_val)
return ret_val;
@@ -397,7 +390,7 @@ int atl1e_phy_commit(struct atl1e_hw *hw)
int ret_val;
u16 phy_data;
- phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
+ phy_data = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data);
if (ret_val) {
@@ -645,15 +638,14 @@ int atl1e_restart_autoneg(struct atl1e_hw *hw)
return err;
if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
- err = atl1e_write_phy_reg(hw, MII_AT001_CR,
+ err = atl1e_write_phy_reg(hw, MII_CTRL1000,
hw->mii_1000t_ctrl_reg);
if (err)
return err;
}
err = atl1e_write_phy_reg(hw, MII_BMCR,
- MII_CR_RESET | MII_CR_AUTO_NEG_EN |
- MII_CR_RESTART_AUTO_NEG);
+ BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
return err;
}
diff --git a/drivers/net/atl1e/atl1e_hw.h b/drivers/net/atl1e/atl1e_hw.h
index 5ea2f4d..74df16a 100644
--- a/drivers/net/atl1e/atl1e_hw.h
+++ b/drivers/net/atl1e/atl1e_hw.h
@@ -629,127 +629,24 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw);
/***************************** MII definition ***************************************/
/* PHY Common Register */
-#define MII_BMCR 0x00
-#define MII_BMSR 0x01
-#define MII_PHYSID1 0x02
-#define MII_PHYSID2 0x03
-#define MII_ADVERTISE 0x04
-#define MII_LPA 0x05
-#define MII_EXPANSION 0x06
-#define MII_AT001_CR 0x09
-#define MII_AT001_SR 0x0A
-#define MII_AT001_ESR 0x0F
#define MII_AT001_PSCR 0x10
#define MII_AT001_PSSR 0x11
#define MII_INT_CTRL 0x12
#define MII_INT_STATUS 0x13
#define MII_SMARTSPEED 0x14
-#define MII_RERRCOUNTER 0x15
-#define MII_SREVISION 0x16
-#define MII_RESV1 0x17
#define MII_LBRERROR 0x18
-#define MII_PHYADDR 0x19
#define MII_RESV2 0x1a
-#define MII_TPISTATUS 0x1b
-#define MII_NCONFIG 0x1c
#define MII_DBG_ADDR 0x1D
#define MII_DBG_DATA 0x1E
-
-/* PHY Control Register */
-#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
-#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
-#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
-#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
-#define MII_CR_POWER_DOWN 0x0800 /* Power down */
-#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
-#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
-#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
-#define MII_CR_SPEED_MASK 0x2040
-#define MII_CR_SPEED_1000 0x0040
-#define MII_CR_SPEED_100 0x2000
-#define MII_CR_SPEED_10 0x0000
-
-
-/* PHY Status Register */
-#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
-#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
-#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
-#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
-#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
-#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
-#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
-#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
-#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
-#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
-#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
-#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
-#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
-#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
-#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
-
-/* Link partner ability register. */
-#define MII_LPA_SLCT 0x001f /* Same as advertise selector */
-#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
-#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
-#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
-#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
-#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */
-#define MII_LPA_PAUSE 0x0400 /* PAUSE */
-#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */
-#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */
-#define MII_LPA_LPACK 0x4000 /* Link partner acked us */
-#define MII_LPA_NPAGE 0x8000 /* Next page bit */
-
/* Autoneg Advertisement Register */
-#define MII_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
-#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
-#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
-#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
-#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
-#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
-#define MII_AR_PAUSE 0x0400 /* Pause operation desired */
-#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
-#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
-#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
-#define MII_AR_SPEED_MASK 0x01E0
-#define MII_AR_DEFAULT_CAP_MASK 0x0DE0
+#define MII_AR_DEFAULT_CAP_MASK 0
/* 1000BASE-T Control Register */
-#define MII_AT001_CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
-#define MII_AT001_CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
-#define MII_AT001_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */
-/* 0=DTE device */
-#define MII_AT001_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
-/* 0=Configure PHY as Slave */
-#define MII_AT001_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
-/* 0=Automatic Master/Slave config */
-#define MII_AT001_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
-#define MII_AT001_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
-#define MII_AT001_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
-#define MII_AT001_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
-#define MII_AT001_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
-#define MII_AT001_CR_1000T_SPEED_MASK 0x0300
-#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK 0x0300
-
-/* 1000BASE-T Status Register */
-#define MII_AT001_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
-#define MII_AT001_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
-#define MII_AT001_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
-#define MII_AT001_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
-#define MII_AT001_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
-#define MII_AT001_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
-#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT 12
-#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT 13
-
-/* Extended Status Register */
-#define MII_AT001_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
-#define MII_AT001_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
-#define MII_AT001_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
-#define MII_AT001_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
+#define MII_AT001_CR_1000T_SPEED_MASK \
+ (ADVERTISE_1000FULL | ADVERTISE_1000HALF)
+#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK MII_AT001_CR_1000T_SPEED_MASK
/* AT001 PHY Specific Control Register */
#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index e28f8ba..bf7500c 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -2051,9 +2051,9 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
- mii_advertise_data = MII_AR_10T_HD_CAPS;
+ mii_advertise_data = ADVERTISE_10HALF;
- if ((atl1e_write_phy_reg(hw, MII_AT001_CR, 0) != 0) ||
+ if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) ||
(atl1e_write_phy_reg(hw,
MII_ADVERTISE, mii_advertise_data) != 0) ||
(atl1e_phy_commit(hw)) != 0) {
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 3b52768..67f40b9 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -950,6 +950,7 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
adapter->wol = 0;
+ device_set_wakeup_enable(&adapter->pdev->dev, false);
adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
adapter->ict = 50000; /* 100ms */
adapter->link_speed = SPEED_0; /* hardware init */
@@ -2735,15 +2736,15 @@ static int atl1_close(struct net_device *netdev)
}
#ifdef CONFIG_PM
-static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
+static int atl1_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_hw *hw = &adapter->hw;
u32 ctrl = 0;
u32 wufc = adapter->wol;
u32 val;
- int retval;
u16 speed;
u16 duplex;
@@ -2751,17 +2752,15 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
if (netif_running(netdev))
atl1_down(adapter);
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
val = ctrl & BMSR_LSTATUS;
if (val)
wufc &= ~ATLX_WUFC_LNKC;
+ if (!wufc)
+ goto disable_wol;
- if (val && wufc) {
+ if (val) {
val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
if (val) {
if (netif_msg_ifdown(adapter))
@@ -2798,23 +2797,18 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
-
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
- goto exit;
- }
-
- if (!val && wufc) {
+ } else {
ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
ioread32(hw->hw_addr + REG_WOL_CTRL);
iowrite32(0, hw->hw_addr + REG_MAC_CTRL);
ioread32(hw->hw_addr + REG_MAC_CTRL);
hw->phy_configured = false;
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
- goto exit;
}
-disable_wol:
+ return 0;
+
+ disable_wol:
iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
ioread32(hw->hw_addr + REG_WOL_CTRL);
ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
@@ -2822,37 +2816,17 @@ disable_wol:
iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
hw->phy_configured = false;
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
-exit:
- if (netif_running(netdev))
- pci_disable_msi(adapter->pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
-static int atl1_resume(struct pci_dev *pdev)
+static int atl1_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1_adapter *adapter = netdev_priv(netdev);
- u32 err;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- err = pci_enable_device(pdev);
- if (err) {
- if (netif_msg_ifup(adapter))
- dev_printk(KERN_DEBUG, &pdev->dev,
- "error enabling pci device\n");
- return err;
- }
-
- pci_set_master(pdev);
iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_enable_wake(pdev, PCI_D3cold, 0);
atl1_reset_hw(&adapter->hw);
@@ -2864,16 +2838,25 @@ static int atl1_resume(struct pci_dev *pdev)
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume);
+#define ATL1_PM_OPS (&atl1_pm_ops)
+
#else
-#define atl1_suspend NULL
-#define atl1_resume NULL
+
+static int atl1_suspend(struct device *dev) { return 0; }
+
+#define ATL1_PM_OPS NULL
#endif
static void atl1_shutdown(struct pci_dev *pdev)
{
-#ifdef CONFIG_PM
- atl1_suspend(pdev, PMSG_SUSPEND);
-#endif
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+
+ atl1_suspend(&pdev->dev);
+ pci_wake_from_d3(pdev, adapter->wol);
+ pci_set_power_state(pdev, PCI_D3hot);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3117,9 +3100,8 @@ static struct pci_driver atl1_driver = {
.id_table = atl1_pci_tbl,
.probe = atl1_probe,
.remove = __devexit_p(atl1_remove),
- .suspend = atl1_suspend,
- .resume = atl1_resume,
- .shutdown = atl1_shutdown
+ .shutdown = atl1_shutdown,
+ .driver.pm = ATL1_PM_OPS,
};
/*
@@ -3409,6 +3391,9 @@ static int atl1_set_wol(struct net_device *netdev,
adapter->wol = 0;
if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= ATLX_WUFC_MAG;
+
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
return 0;
}
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index add0b93..3a800e2 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -450,9 +450,8 @@ static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
mac[5] = (u8)(addr & 0xFF);
mac[4] = (u8)((addr >> 8) & 0xFF);
mac[3] = (u8)((addr >> 16) & 0xFF);
- mac[2] = 0xC9;
- mac[1] = 0x00;
- mac[0] = 0x00;
+ /* Use the OUI from the current MAC address */
+ memcpy(mac, adapter->netdev->dev_addr, 3);
}
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index a179cc6..619ebc2 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -23,6 +23,12 @@ static void be_mcc_notify(struct be_adapter *adapter)
struct be_queue_info *mccq = &adapter->mcc_obj.q;
u32 val = 0;
+ if (adapter->eeh_err) {
+ dev_info(&adapter->pdev->dev,
+ "Error in Card Detected! Cannot issue commands\n");
+ return;
+ }
+
val |= mccq->id & DB_MCCQ_RING_ID_MASK;
val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
@@ -102,6 +108,7 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
{
if (evt->valid) {
adapter->vlan_prio_bmap = evt->available_priority_bmap;
+ adapter->recommended_prio &= ~VLAN_PRIO_MASK;
adapter->recommended_prio =
evt->reco_default_priority << VLAN_PRIO_SHIFT;
}
@@ -216,6 +223,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
int i, num, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+ if (adapter->eeh_err)
+ return -EIO;
+
for (i = 0; i < mcc_timeout; i++) {
num = be_process_mcc(adapter, &status);
if (num)
@@ -245,6 +255,12 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
int msecs = 0;
u32 ready;
+ if (adapter->eeh_err) {
+ dev_err(&adapter->pdev->dev,
+ "Error detected in card.Cannot issue commands\n");
+ return -EIO;
+ }
+
do {
ready = ioread32(db);
if (ready == 0xffffffff) {
@@ -598,7 +614,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
/* Uses synchronous MCCQ */
int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
- u32 if_id, u32 *pmac_id)
+ u32 if_id, u32 *pmac_id, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_pmac_add *req;
@@ -619,6 +635,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
+ req->hdr.domain = domain;
req->if_id = cpu_to_le32(if_id);
memcpy(req->mac_address, mac_addr, ETH_ALEN);
@@ -634,7 +651,7 @@ err:
}
/* Uses synchronous MCCQ */
-int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_pmac_del *req;
@@ -655,6 +672,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
+ req->hdr.domain = dom;
req->if_id = cpu_to_le32(if_id);
req->pmac_id = cpu_to_le32(pmac_id);
@@ -995,7 +1013,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
}
/* Uses mbox */
-int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
+int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_if_destroy *req;
@@ -1016,6 +1034,7 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
+ req->hdr.domain = domain;
req->interface_id = cpu_to_le32(interface_id);
status = be_mbox_notify_wait(adapter);
@@ -1868,8 +1887,8 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
OPCODE_COMMON_SET_QOS, sizeof(*req));
req->hdr.domain = domain;
- req->valid_bits = BE_QOS_BITS_NIC;
- req->max_bps_nic = bps;
+ req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
+ req->max_bps_nic = cpu_to_le32(bps);
status = be_mcc_notify_wait(adapter);
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 83d15c8..91c5d2b 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -999,12 +999,14 @@ extern int be_cmd_POST(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
u8 type, bool permanent, u32 if_handle);
extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
- u32 if_id, u32 *pmac_id);
-extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
+ u32 if_id, u32 *pmac_id, u32 domain);
+extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
+ u32 pmac_id, u32 domain);
extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
u32 en_flags, u8 *mac, bool pmac_invalid,
u32 *if_handle, u32 *pmac_id, u32 domain);
-extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
+extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
+ u32 domain);
extern int be_cmd_eq_create(struct be_adapter *adapter,
struct be_queue_info *eq, int eq_delay);
extern int be_cmd_cq_create(struct be_adapter *adapter,
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index b4be027..07b4ab9 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -376,8 +376,9 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
}
phy_cmd.size = sizeof(struct be_cmd_req_get_phy_info);
- phy_cmd.va = pci_alloc_consistent(adapter->pdev, phy_cmd.size,
- &phy_cmd.dma);
+ phy_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+ phy_cmd.size, &phy_cmd.dma,
+ GFP_KERNEL);
if (!phy_cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
return -ENOMEM;
@@ -416,8 +417,8 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
adapter->port_type = ecmd->port;
adapter->transceiver = ecmd->transceiver;
adapter->autoneg = ecmd->autoneg;
- pci_free_consistent(adapter->pdev, phy_cmd.size,
- phy_cmd.va, phy_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, phy_cmd.size, phy_cmd.va,
+ phy_cmd.dma);
} else {
ecmd->speed = adapter->link_speed;
ecmd->port = adapter->port_type;
@@ -515,12 +516,23 @@ be_phys_id(struct net_device *netdev, u32 data)
return status;
}
+static bool
+be_is_wol_supported(struct be_adapter *adapter)
+{
+ if (!be_physfn(adapter))
+ return false;
+ else
+ return true;
+}
+
static void
be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
- wol->supported = WAKE_MAGIC;
+ if (be_is_wol_supported(adapter))
+ wol->supported = WAKE_MAGIC;
+
if (adapter->wol)
wol->wolopts = WAKE_MAGIC;
else
@@ -536,7 +548,7 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
- if (wol->wolopts & WAKE_MAGIC)
+ if ((wol->wolopts & WAKE_MAGIC) && be_is_wol_supported(adapter))
adapter->wol = true;
else
adapter->wol = false;
@@ -554,8 +566,8 @@ be_test_ddr_dma(struct be_adapter *adapter)
};
ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
- ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
- &ddrdma_cmd.dma);
+ ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
+ &ddrdma_cmd.dma, GFP_KERNEL);
if (!ddrdma_cmd.va) {
dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
return -ENOMEM;
@@ -569,8 +581,8 @@ be_test_ddr_dma(struct be_adapter *adapter)
}
err:
- pci_free_consistent(adapter->pdev, ddrdma_cmd.size,
- ddrdma_cmd.va, ddrdma_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
+ ddrdma_cmd.dma);
return ret;
}
@@ -662,8 +674,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
- eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size,
- &eeprom_cmd.dma);
+ eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
+ &eeprom_cmd.dma, GFP_KERNEL);
if (!eeprom_cmd.va) {
dev_err(&adapter->pdev->dev,
@@ -677,8 +689,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
}
- pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va,
- eeprom_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
+ eeprom_cmd.dma);
return status;
}
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 28a32a6..aad7ea3 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -125,8 +125,8 @@ static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
{
struct be_dma_mem *mem = &q->dma_mem;
if (mem->va)
- pci_free_consistent(adapter->pdev, mem->size,
- mem->va, mem->dma);
+ dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+ mem->dma);
}
static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
@@ -138,7 +138,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
q->len = len;
q->entry_size = entry_size;
mem->size = len * entry_size;
- mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
+ mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
+ GFP_KERNEL);
if (!mem->va)
return -1;
memset(mem->va, 0, mem->size);
@@ -235,12 +236,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (!be_physfn(adapter))
goto netdev_addr;
- status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
+ status = be_cmd_pmac_del(adapter, adapter->if_handle,
+ adapter->pmac_id, 0);
if (status)
return status;
status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
- adapter->if_handle, &adapter->pmac_id);
+ adapter->if_handle, &adapter->pmac_id, 0);
netdev_addr:
if (!status)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -484,7 +486,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
}
-static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
+static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
bool unmap_single)
{
dma_addr_t dma;
@@ -494,11 +496,10 @@ static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
if (wrb->frag_len) {
if (unmap_single)
- pci_unmap_single(pdev, dma, wrb->frag_len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(dev, dma, wrb->frag_len,
+ DMA_TO_DEVICE);
else
- pci_unmap_page(pdev, dma, wrb->frag_len,
- PCI_DMA_TODEVICE);
+ dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
}
}
@@ -507,7 +508,7 @@ static int make_tx_wrbs(struct be_adapter *adapter,
{
dma_addr_t busaddr;
int i, copied = 0;
- struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = &adapter->pdev->dev;
struct sk_buff *first_skb = skb;
struct be_queue_info *txq = &adapter->tx_obj.q;
struct be_eth_wrb *wrb;
@@ -521,9 +522,8 @@ static int make_tx_wrbs(struct be_adapter *adapter,
if (skb->len > skb->data_len) {
int len = skb_headlen(skb);
- busaddr = pci_map_single(pdev, skb->data, len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, busaddr))
+ busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, busaddr))
goto dma_err;
map_single = true;
wrb = queue_head_node(txq);
@@ -536,10 +536,9 @@ static int make_tx_wrbs(struct be_adapter *adapter,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *frag =
&skb_shinfo(skb)->frags[i];
- busaddr = pci_map_page(pdev, frag->page,
- frag->page_offset,
- frag->size, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, busaddr))
+ busaddr = dma_map_page(dev, frag->page, frag->page_offset,
+ frag->size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, busaddr))
goto dma_err;
wrb = queue_head_node(txq);
wrb_fill(wrb, busaddr, frag->size);
@@ -563,7 +562,7 @@ dma_err:
txq->head = map_head;
while (copied) {
wrb = queue_head_node(txq);
- unmap_tx_frag(pdev, wrb, map_single);
+ unmap_tx_frag(dev, wrb, map_single);
map_single = false;
copied -= wrb->frag_len;
queue_head_inc(txq);
@@ -743,11 +742,11 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
status = be_cmd_pmac_del(adapter,
adapter->vf_cfg[vf].vf_if_handle,
- adapter->vf_cfg[vf].vf_pmac_id);
+ adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
status = be_cmd_pmac_add(adapter, mac,
adapter->vf_cfg[vf].vf_if_handle,
- &adapter->vf_cfg[vf].vf_pmac_id);
+ &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
if (status)
dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
@@ -822,7 +821,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
rate = 10000;
adapter->vf_cfg[vf].vf_tx_rate = rate;
- status = be_cmd_set_qos(adapter, rate / 10, vf);
+ status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
if (status)
dev_info(&adapter->pdev->dev,
@@ -888,8 +887,9 @@ get_rx_page_info(struct be_adapter *adapter,
BUG_ON(!rx_page_info->page);
if (rx_page_info->last_page_user) {
- pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
- adapter->big_page_size, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&adapter->pdev->dev,
+ dma_unmap_addr(rx_page_info, bus),
+ adapter->big_page_size, DMA_FROM_DEVICE);
rx_page_info->last_page_user = false;
}
@@ -1195,9 +1195,9 @@ static void be_post_rx_frags(struct be_rx_obj *rxo)
rxo->stats.rx_post_fail++;
break;
}
- page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
- adapter->big_page_size,
- PCI_DMA_FROMDEVICE);
+ page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
+ 0, adapter->big_page_size,
+ DMA_FROM_DEVICE);
page_info->page_offset = 0;
} else {
get_page(pagep);
@@ -1270,8 +1270,8 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
do {
cur_index = txq->tail;
wrb = queue_tail_node(txq);
- unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
- skb_headlen(sent_skb)));
+ unmap_tx_frag(&adapter->pdev->dev, wrb,
+ (unmap_skb_hdr && skb_headlen(sent_skb)));
unmap_skb_hdr = false;
num_wrbs++;
@@ -1827,6 +1827,7 @@ void be_detect_dump_ue(struct be_adapter *adapter)
if (ue_status_lo || ue_status_hi) {
adapter->ue_detected = true;
+ adapter->eeh_err = true;
dev_err(&adapter->pdev->dev, "UE Detected!!\n");
}
@@ -1865,6 +1866,10 @@ static void be_worker(struct work_struct *work)
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
}
+
+ if (!adapter->ue_detected && !lancer_chip(adapter))
+ be_detect_dump_ue(adapter);
+
goto reschedule;
}
@@ -2179,7 +2184,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
memset(mac, 0, ETH_ALEN);
cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_KERNEL);
if (cmd.va == NULL)
return -1;
memset(cmd.va, 0, cmd.size);
@@ -2190,8 +2196,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
if (status) {
dev_err(&adapter->pdev->dev,
"Could not enable Wake-on-lan\n");
- pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
- cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+ cmd.dma);
return status;
}
status = be_cmd_enable_magic_wol(adapter,
@@ -2204,7 +2210,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
}
- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
return status;
}
@@ -2225,7 +2231,8 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
for (vf = 0; vf < num_vfs; vf++) {
status = be_cmd_pmac_add(adapter, mac,
adapter->vf_cfg[vf].vf_if_handle,
- &adapter->vf_cfg[vf].vf_pmac_id);
+ &adapter->vf_cfg[vf].vf_pmac_id,
+ vf + 1);
if (status)
dev_err(&adapter->pdev->dev,
"Mac address add failed for VF %d\n", vf);
@@ -2245,7 +2252,7 @@ static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
be_cmd_pmac_del(adapter,
adapter->vf_cfg[vf].vf_if_handle,
- adapter->vf_cfg[vf].vf_pmac_id);
+ adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
}
}
@@ -2277,22 +2284,26 @@ static int be_setup(struct be_adapter *adapter)
goto do_none;
if (be_physfn(adapter)) {
- while (vf < num_vfs) {
- cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
- | BE_IF_FLAGS_BROADCAST;
- status = be_cmd_if_create(adapter, cap_flags, en_flags,
- mac, true,
+ if (adapter->sriov_enabled) {
+ while (vf < num_vfs) {
+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST;
+ status = be_cmd_if_create(adapter, cap_flags,
+ en_flags, mac, true,
&adapter->vf_cfg[vf].vf_if_handle,
NULL, vf+1);
- if (status) {
- dev_err(&adapter->pdev->dev,
- "Interface Create failed for VF %d\n", vf);
- goto if_destroy;
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Interface Create failed for VF %d\n",
+ vf);
+ goto if_destroy;
+ }
+ adapter->vf_cfg[vf].vf_pmac_id =
+ BE_INVALID_PMAC_ID;
+ vf++;
}
- adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
- vf++;
}
- } else if (!be_physfn(adapter)) {
+ } else {
status = be_cmd_mac_addr_query(adapter, mac,
MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
if (!status) {
@@ -2313,44 +2324,46 @@ static int be_setup(struct be_adapter *adapter)
if (status != 0)
goto rx_qs_destroy;
- if (be_physfn(adapter)) {
- status = be_vf_eth_addr_config(adapter);
- if (status)
- goto mcc_q_destroy;
- }
-
adapter->link_speed = -1;
return 0;
-mcc_q_destroy:
- if (be_physfn(adapter))
- be_vf_eth_addr_rem(adapter);
be_mcc_queues_destroy(adapter);
rx_qs_destroy:
be_rx_queues_destroy(adapter);
tx_qs_destroy:
be_tx_queues_destroy(adapter);
if_destroy:
- for (vf = 0; vf < num_vfs; vf++)
- if (adapter->vf_cfg[vf].vf_if_handle)
- be_cmd_if_destroy(adapter,
- adapter->vf_cfg[vf].vf_if_handle);
- be_cmd_if_destroy(adapter, adapter->if_handle);
+ if (be_physfn(adapter) && adapter->sriov_enabled)
+ for (vf = 0; vf < num_vfs; vf++)
+ if (adapter->vf_cfg[vf].vf_if_handle)
+ be_cmd_if_destroy(adapter,
+ adapter->vf_cfg[vf].vf_if_handle,
+ vf + 1);
+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
do_none:
return status;
}
static int be_clear(struct be_adapter *adapter)
{
- if (be_physfn(adapter))
+ int vf;
+
+ if (be_physfn(adapter) && adapter->sriov_enabled)
be_vf_eth_addr_rem(adapter);
be_mcc_queues_destroy(adapter);
be_rx_queues_destroy(adapter);
be_tx_queues_destroy(adapter);
- be_cmd_if_destroy(adapter, adapter->if_handle);
+ if (be_physfn(adapter) && adapter->sriov_enabled)
+ for (vf = 0; vf < num_vfs; vf++)
+ if (adapter->vf_cfg[vf].vf_if_handle)
+ be_cmd_if_destroy(adapter,
+ adapter->vf_cfg[vf].vf_if_handle,
+ vf + 1);
+
+ be_cmd_if_destroy(adapter, adapter->if_handle, 0);
/* tell fw we're done with firing cmds */
be_cmd_fw_clean(adapter);
@@ -2453,8 +2466,8 @@ static int be_flash_data(struct be_adapter *adapter,
continue;
if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
(!be_flash_redboot(adapter, fw->data,
- pflashcomp[i].offset, pflashcomp[i].size,
- filehdr_size)))
+ pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
+ (num_of_images * sizeof(struct image_hdr)))))
continue;
p = fw->data;
p += filehdr_size + pflashcomp[i].offset
@@ -2528,8 +2541,8 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
- flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
- &flash_cmd.dma);
+ flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
+ &flash_cmd.dma, GFP_KERNEL);
if (!flash_cmd.va) {
status = -ENOMEM;
dev_err(&adapter->pdev->dev,
@@ -2558,8 +2571,8 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
status = -1;
}
- pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
- flash_cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
+ flash_cmd.dma);
if (status) {
dev_err(&adapter->pdev->dev, "Firmware load error\n");
goto fw_exit;
@@ -2700,13 +2713,13 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
be_unmap_pci_bars(adapter);
if (mem->va)
- pci_free_consistent(adapter->pdev, mem->size,
- mem->va, mem->dma);
+ dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+ mem->dma);
mem = &adapter->mc_cmd_mem;
if (mem->va)
- pci_free_consistent(adapter->pdev, mem->size,
- mem->va, mem->dma);
+ dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+ mem->dma);
}
static int be_ctrl_init(struct be_adapter *adapter)
@@ -2721,8 +2734,10 @@ static int be_ctrl_init(struct be_adapter *adapter)
goto done;
mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
- mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
- mbox_mem_alloc->size, &mbox_mem_alloc->dma);
+ mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
+ mbox_mem_alloc->size,
+ &mbox_mem_alloc->dma,
+ GFP_KERNEL);
if (!mbox_mem_alloc->va) {
status = -ENOMEM;
goto unmap_pci_bars;
@@ -2734,8 +2749,9 @@ static int be_ctrl_init(struct be_adapter *adapter)
memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
- mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
- &mc_cmd_mem->dma);
+ mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
+ mc_cmd_mem->size, &mc_cmd_mem->dma,
+ GFP_KERNEL);
if (mc_cmd_mem->va == NULL) {
status = -ENOMEM;
goto free_mbox;
@@ -2751,8 +2767,8 @@ static int be_ctrl_init(struct be_adapter *adapter)
return 0;
free_mbox:
- pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
- mbox_mem_alloc->va, mbox_mem_alloc->dma);
+ dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
+ mbox_mem_alloc->va, mbox_mem_alloc->dma);
unmap_pci_bars:
be_unmap_pci_bars(adapter);
@@ -2766,8 +2782,8 @@ static void be_stats_cleanup(struct be_adapter *adapter)
struct be_dma_mem *cmd = &adapter->stats_cmd;
if (cmd->va)
- pci_free_consistent(adapter->pdev, cmd->size,
- cmd->va, cmd->dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd->size,
+ cmd->va, cmd->dma);
}
static int be_stats_init(struct be_adapter *adapter)
@@ -2775,7 +2791,8 @@ static int be_stats_init(struct be_adapter *adapter)
struct be_dma_mem *cmd = &adapter->stats_cmd;
cmd->size = sizeof(struct be_cmd_req_get_stats);
- cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
+ cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
+ GFP_KERNEL);
if (cmd->va == NULL)
return -1;
memset(cmd->va, 0, cmd->size);
@@ -2918,11 +2935,11 @@ static int __devinit be_probe(struct pci_dev *pdev,
adapter->netdev = netdev;
SET_NETDEV_DEV(netdev, &pdev->dev);
- status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!status) {
netdev->features |= NETIF_F_HIGHDMA;
} else {
- status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (status) {
dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
goto free_netdev;
@@ -2947,11 +2964,9 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status)
goto ctrl_clean;
- if (be_physfn(adapter)) {
- status = be_cmd_reset_function(adapter);
- if (status)
- goto ctrl_clean;
- }
+ status = be_cmd_reset_function(adapter);
+ if (status)
+ goto ctrl_clean;
status = be_stats_init(adapter);
if (status)
@@ -2975,10 +2990,18 @@ static int __devinit be_probe(struct pci_dev *pdev,
goto unsetup;
netif_carrier_off(netdev);
+ if (be_physfn(adapter) && adapter->sriov_enabled) {
+ status = be_vf_eth_addr_config(adapter);
+ if (status)
+ goto unreg_netdev;
+ }
+
dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
+unreg_netdev:
+ unregister_netdev(netdev);
unsetup:
be_clear(adapter);
msix_disable:
@@ -3005,6 +3028,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
struct be_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
+ cancel_delayed_work_sync(&adapter->work);
if (adapter->wol)
be_setup_wol(adapter, true);
@@ -3017,6 +3041,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
be_clear(adapter);
+ be_msix_disable(adapter);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -3038,6 +3063,7 @@ static int be_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, 0);
pci_restore_state(pdev);
+ be_msix_enable(adapter);
/* tell fw we're ready to fire cmds */
status = be_cmd_fw_init(adapter);
if (status)
@@ -3053,6 +3079,8 @@ static int be_resume(struct pci_dev *pdev)
if (adapter->wol)
be_setup_wol(adapter, false);
+
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
}
@@ -3064,6 +3092,9 @@ static void be_shutdown(struct pci_dev *pdev)
struct be_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
+ if (netif_running(netdev))
+ cancel_delayed_work_sync(&adapter->work);
+
netif_device_detach(netdev);
be_cmd_reset_function(adapter);
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index fad9126..9f356d5 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -126,22 +126,22 @@ bnad_free_all_txbufs(struct bnad *bnad,
}
unmap_array[unmap_cons].skb = NULL;
- pci_unmap_single(bnad->pcidev,
- pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_unmap_single(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr), skb_headlen(skb),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
- pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+ dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
if (++unmap_cons >= unmap_q->q_depth)
break;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- pci_unmap_page(bnad->pcidev,
- pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_unmap_page(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr),
skb_shinfo(skb)->frags[i].size,
- PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
0);
if (++unmap_cons >= unmap_q->q_depth)
break;
@@ -199,23 +199,23 @@ bnad_free_txbufs(struct bnad *bnad,
sent_bytes += skb->len;
wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
- pci_unmap_single(bnad->pcidev,
- pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_unmap_single(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr), skb_headlen(skb),
- PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
prefetch(&unmap_array[unmap_cons + 1]);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
prefetch(&unmap_array[unmap_cons + 1]);
- pci_unmap_page(bnad->pcidev,
- pci_unmap_addr(&unmap_array[unmap_cons],
+ dma_unmap_page(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr),
skb_shinfo(skb)->frags[i].size,
- PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
0);
BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
}
@@ -340,19 +340,22 @@ static void
bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
{
struct bnad_unmap_q *unmap_q;
+ struct bnad_skb_unmap *unmap_array;
struct sk_buff *skb;
int unmap_cons;
unmap_q = rcb->unmap_q;
+ unmap_array = unmap_q->unmap_array;
for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
- skb = unmap_q->unmap_array[unmap_cons].skb;
+ skb = unmap_array[unmap_cons].skb;
if (!skb)
continue;
- unmap_q->unmap_array[unmap_cons].skb = NULL;
- pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
- unmap_array[unmap_cons],
- dma_addr), rcb->rxq->buffer_size,
- PCI_DMA_FROMDEVICE);
+ unmap_array[unmap_cons].skb = NULL;
+ dma_unmap_single(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
+ dma_addr),
+ rcb->rxq->buffer_size,
+ DMA_FROM_DEVICE);
dev_kfree_skb(skb);
}
bnad_reset_rcb(bnad, rcb);
@@ -391,9 +394,10 @@ bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
skb->dev = bnad->netdev;
skb_reserve(skb, NET_IP_ALIGN);
unmap_array[unmap_prod].skb = skb;
- dma_addr = pci_map_single(bnad->pcidev, skb->data,
- rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE);
- pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
+ dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
+ rcb->rxq->buffer_size,
+ DMA_FROM_DEVICE);
+ dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
dma_addr);
BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -434,8 +438,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
struct bna_rcb *rcb = NULL;
unsigned int wi_range, packets = 0, wis = 0;
struct bnad_unmap_q *unmap_q;
+ struct bnad_skb_unmap *unmap_array;
struct sk_buff *skb;
- u32 flags;
+ u32 flags, unmap_cons;
u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
@@ -456,17 +461,17 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
rcb = ccb->rcb[1];
unmap_q = rcb->unmap_q;
+ unmap_array = unmap_q->unmap_array;
+ unmap_cons = unmap_q->consumer_index;
- skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+ skb = unmap_array[unmap_cons].skb;
BUG_ON(!(skb));
- unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
- pci_unmap_single(bnad->pcidev,
- pci_unmap_addr(&unmap_q->
- unmap_array[unmap_q->
- consumer_index],
+ unmap_array[unmap_cons].skb = NULL;
+ dma_unmap_single(&bnad->pcidev->dev,
+ dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr),
- rcb->rxq->buffer_size,
- PCI_DMA_FROMDEVICE);
+ rcb->rxq->buffer_size,
+ DMA_FROM_DEVICE);
BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
/* Should be more efficient ? Performance ? */
@@ -1015,9 +1020,9 @@ bnad_mem_free(struct bnad *bnad,
if (mem_info->mem_type == BNA_MEM_T_DMA) {
BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
dma_pa);
- pci_free_consistent(bnad->pcidev,
- mem_info->mdl[i].len,
- mem_info->mdl[i].kva, dma_pa);
+ dma_free_coherent(&bnad->pcidev->dev,
+ mem_info->mdl[i].len,
+ mem_info->mdl[i].kva, dma_pa);
} else
kfree(mem_info->mdl[i].kva);
}
@@ -1047,8 +1052,9 @@ bnad_mem_alloc(struct bnad *bnad,
for (i = 0; i < mem_info->num; i++) {
mem_info->mdl[i].len = mem_info->len;
mem_info->mdl[i].kva =
- pci_alloc_consistent(bnad->pcidev,
- mem_info->len, &dma_pa);
+ dma_alloc_coherent(&bnad->pcidev->dev,
+ mem_info->len, &dma_pa,
+ GFP_KERNEL);
if (mem_info->mdl[i].kva == NULL)
goto err_return;
@@ -2600,9 +2606,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
unmap_q->unmap_array[unmap_prod].skb = skb;
BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
txqent->vector[vect_id].length = htons(skb_headlen(skb));
- dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
- PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+ dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
dma_addr);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
@@ -2630,11 +2636,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
txqent->vector[vect_id].length = htons(size);
- dma_addr =
- pci_map_page(bnad->pcidev, frag->page,
- frag->page_offset, size,
- PCI_DMA_TODEVICE);
- pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+ dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
+ frag->page_offset, size, DMA_TO_DEVICE);
+ dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
dma_addr);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -3022,14 +3026,14 @@ bnad_pci_init(struct bnad *bnad,
err = pci_request_regions(pdev, BNAD_NAME);
if (err)
goto disable_device;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
*using_dac = 1;
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32));
if (err)
goto release_regions;
}
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
index 8b1d515..a89117f 100644
--- a/drivers/net/bna/bnad.h
+++ b/drivers/net/bna/bnad.h
@@ -181,7 +181,7 @@ struct bnad_rx_info {
/* Unmap queues for Tx / Rx cleanup */
struct bnad_skb_unmap {
struct sk_buff *skb;
- DECLARE_PCI_UNMAP_ADDR(dma_addr)
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
};
struct bnad_unmap_q {
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 0ba59d5..2a961b7 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -435,7 +435,8 @@ bnx2_cnic_stop(struct bnx2 *bp)
struct cnic_ctl_info info;
mutex_lock(&bp->cnic_lock);
- c_ops = bp->cnic_ops;
+ c_ops = rcu_dereference_protected(bp->cnic_ops,
+ lockdep_is_held(&bp->cnic_lock));
if (c_ops) {
info.cmd = CNIC_CTL_STOP_CMD;
c_ops->cnic_ctl(bp->cnic_data, &info);
@@ -450,7 +451,8 @@ bnx2_cnic_start(struct bnx2 *bp)
struct cnic_ctl_info info;
mutex_lock(&bp->cnic_lock);
- c_ops = bp->cnic_ops;
+ c_ops = rcu_dereference_protected(bp->cnic_ops,
+ lockdep_is_held(&bp->cnic_lock));
if (c_ops) {
if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
@@ -8315,7 +8317,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
#endif
};
-static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
+static void inline vlan_features_add(struct net_device *dev, u32 flags)
{
dev->vlan_features |= flags;
}
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index f459fb2..7a5e88f 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6207,6 +6207,8 @@ struct l2_fhdr {
#define BNX2_CP_SCRATCH 0x001a0000
+#define BNX2_FW_MAX_ISCSI_CONN 0x001a0080
+
/*
* mcp_reg definition
@@ -6759,7 +6761,7 @@ struct bnx2 {
u32 tx_wake_thresh;
#ifdef BCM_CNIC
- struct cnic_ops *cnic_ops;
+ struct cnic_ops __rcu *cnic_ops;
void *cnic_data;
#endif
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 653c624..c0dd30d 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -22,8 +22,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.62.00-5"
-#define DRV_MODULE_RELDATE "2011/01/30"
+#define DRV_MODULE_VERSION "1.62.11-0"
+#define DRV_MODULE_RELDATE "2011/01/31"
#define BNX2X_BC_VER 0x040200
#define BNX2X_MULTI_QUEUE
@@ -129,6 +129,7 @@ void bnx2x_panic_dump(struct bnx2x *bp);
#endif
#define bnx2x_mc_addr(ha) ((ha)->addr)
+#define bnx2x_uc_addr(ha) ((ha)->addr)
#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
#define U64_HI(x) (u32)(((u64)(x)) >> 32)
@@ -341,6 +342,8 @@ struct bnx2x_fastpath {
/* chip independed shortcut into rx_prods_offset memory */
u32 ustorm_rx_prods_offset;
+ u32 rx_buf_size;
+
dma_addr_t status_blk_mapping;
struct sw_tx_bd *tx_buf_ring;
@@ -428,6 +431,10 @@ struct bnx2x_fastpath {
};
#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
+
+/* Use 2500 as a mini-jumbo MTU for FCoE */
+#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
+
#ifdef BCM_CNIC
/* FCoE L2 `fastpath' is right after the eth entries */
#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp)
@@ -810,6 +817,7 @@ struct bnx2x_slowpath {
struct eth_stats_query fw_stats;
struct mac_configuration_cmd mac_config;
struct mac_configuration_cmd mcast_config;
+ struct mac_configuration_cmd uc_mac_config;
struct client_init_ramrod_data client_init_data;
/* used by dmae command executer */
@@ -911,7 +919,6 @@ struct bnx2x {
int tx_ring_size;
u32 rx_csum;
- u32 rx_buf_size;
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
#define ETH_MIN_PACKET_SIZE 60
@@ -939,7 +946,7 @@ struct bnx2x {
struct eth_spe *spq_prod_bd;
struct eth_spe *spq_last_bd;
__le16 *dsb_sp_prod;
- atomic_t spq_left; /* serialize spq */
+ atomic_t cq_spq_left; /* ETH_XXX ramrods credit */
/* used to synchronize spq accesses */
spinlock_t spq_lock;
@@ -949,6 +956,7 @@ struct bnx2x {
u16 eq_prod;
u16 eq_cons;
__le16 *eq_cons_sb;
+ atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */
/* Flags for marking that there is a STAT_QUERY or
SET_MAC ramrod pending */
@@ -976,8 +984,12 @@ struct bnx2x {
#define MF_FUNC_DIS 0x1000
#define FCOE_MACS_SET 0x2000
#define NO_FCOE_FLAG 0x4000
+#define NO_ISCSI_OOO_FLAG 0x8000
+#define NO_ISCSI_FLAG 0x10000
#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
+#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
+#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
int pf_num; /* absolute PF number */
int pfid; /* per-path PF number */
@@ -1064,6 +1076,7 @@ struct bnx2x {
int num_queues;
int disable_tpa;
int int_mode;
+ u32 *rx_indir_table;
struct tstorm_eth_mac_filter_config mac_filters;
#define BNX2X_ACCEPT_NONE 0x0000
@@ -1110,7 +1123,7 @@ struct bnx2x {
#define BNX2X_CNIC_FLAG_MAC_SET 1
void *t2;
dma_addr_t t2_mapping;
- struct cnic_ops *cnic_ops;
+ struct cnic_ops __rcu *cnic_ops;
void *cnic_data;
u32 cnic_tag;
struct cnic_eth_dev cnic_eth_dev;
@@ -1125,13 +1138,12 @@ struct bnx2x {
u16 cnic_kwq_pending;
u16 cnic_spq_pending;
struct mutex cnic_mutex;
- u8 iscsi_mac[ETH_ALEN];
u8 fip_mac[ETH_ALEN];
#endif
int dmae_ready;
/* used to synchronize dmae accesses */
- struct mutex dmae_mutex;
+ spinlock_t dmae_lock;
/* used to protect the FW mail box */
struct mutex fw_mb_mutex;
@@ -1447,6 +1459,12 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
void bnx2x_calc_fc_adv(struct bnx2x *bp);
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
u32 data_hi, u32 data_lo, int common);
+
+/* Clears multicast and unicast list configuration in the chip. */
+void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp);
+void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp);
+void bnx2x_invalidate_uc_list(struct bnx2x *bp);
+
void bnx2x_update_coalesce(struct bnx2x *bp);
int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
@@ -1782,5 +1800,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */
extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
+void bnx2x_push_indir_table(struct bnx2x *bp);
#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 710ce5d..6fac8e1 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -232,7 +232,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
/* move empty skb from pool to prod and map it */
prod_rx_buf->skb = fp->tpa_pool[queue].skb;
mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
- bp->rx_buf_size, DMA_FROM_DEVICE);
+ fp->rx_buf_size, DMA_FROM_DEVICE);
dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
/* move partial skb from cons to pool (don't unmap yet) */
@@ -333,13 +333,13 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
struct sk_buff *skb = rx_buf->skb;
/* alloc new skb */
- struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+ struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
/* Unmap skb in the pool anyway, as we are going to change
pool entry status to BNX2X_TPA_STOP even if new skb allocation
fails. */
dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, DMA_FROM_DEVICE);
+ fp->rx_buf_size, DMA_FROM_DEVICE);
if (likely(new_skb)) {
/* fix ip xsum and give it to the stack */
@@ -349,10 +349,10 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
prefetch(((char *)(skb)) + L1_CACHE_BYTES);
#ifdef BNX2X_STOP_ON_ERROR
- if (pad + len > bp->rx_buf_size) {
+ if (pad + len > fp->rx_buf_size) {
BNX2X_ERR("skb_put is about to fail... "
"pad %d len %d rx_buf_size %d\n",
- pad, len, bp->rx_buf_size);
+ pad, len, fp->rx_buf_size);
bnx2x_panic();
return;
}
@@ -582,7 +582,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size,
+ fp->rx_buf_size,
DMA_FROM_DEVICE);
skb_reserve(skb, pad);
skb_put(skb, len);
@@ -821,19 +821,16 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
u16 ring_prod;
int i, j;
- bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
- IP_HEADER_ALIGNMENT_PADDING;
-
- DP(NETIF_MSG_IFUP,
- "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
-
for_each_rx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
+ DP(NETIF_MSG_IFUP,
+ "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
+
if (!fp->disable_tpa) {
for (i = 0; i < max_agg_queues; i++) {
fp->tpa_pool[i].skb =
- netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+ netdev_alloc_skb(bp->dev, fp->rx_buf_size);
if (!fp->tpa_pool[i].skb) {
BNX2X_ERR("Failed to allocate TPA "
"skb pool for queue[%d] - "
@@ -941,7 +938,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, DMA_FROM_DEVICE);
+ fp->rx_buf_size, DMA_FROM_DEVICE);
rx_buf->skb = NULL;
dev_kfree_skb(skb);
@@ -1249,6 +1246,31 @@ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
return rc;
}
+static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ /* Always use a mini-jumbo MTU for the FCoE L2 ring */
+ if (IS_FCOE_IDX(i))
+ /*
+ * Although there are no IP frames expected to arrive to
+ * this ring we still want to add an
+ * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
+ * overrun attack.
+ */
+ fp->rx_buf_size =
+ BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
+ BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+ else
+ fp->rx_buf_size =
+ bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
+ IP_HEADER_ALIGNMENT_PADDING;
+ }
+}
+
/* must be called with rtnl_lock */
int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
{
@@ -1272,6 +1294,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* must be called before memory allocation and HW init */
bnx2x_ilt_set_info(bp);
+ /* Set the receive queues buffer size */
+ bnx2x_set_rx_buf_size(bp);
+
if (bnx2x_alloc_mem(bp))
return -ENOMEM;
@@ -1427,28 +1452,35 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bnx2x_set_eth_mac(bp, 1);
+ /* Clear MC configuration */
+ if (CHIP_IS_E1(bp))
+ bnx2x_invalidate_e1_mc_list(bp);
+ else
+ bnx2x_invalidate_e1h_mc_list(bp);
+
+ /* Clear UC lists configuration */
+ bnx2x_invalidate_uc_list(bp);
+
if (bp->port.pmf)
bnx2x_initial_phy_init(bp, load_mode);
+ /* Initialize Rx filtering */
+ bnx2x_set_rx_mode(bp->dev);
+
/* Start fast path */
switch (load_mode) {
case LOAD_NORMAL:
/* Tx queue should be only reenabled */
netif_tx_wake_all_queues(bp->dev);
/* Initialize the receive filter. */
- bnx2x_set_rx_mode(bp->dev);
break;
case LOAD_OPEN:
netif_tx_start_all_queues(bp->dev);
smp_mb__after_clear_bit();
- /* Initialize the receive filter. */
- bnx2x_set_rx_mode(bp->dev);
break;
case LOAD_DIAG:
- /* Initialize the receive filter. */
- bnx2x_set_rx_mode(bp->dev);
bp->state = BNX2X_STATE_DIAG;
break;
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 03eb4d6..f062d5d 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -822,11 +822,11 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
dma_addr_t mapping;
- skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+ skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
if (unlikely(skb == NULL))
return -ENOMEM;
- mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
+ mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
dev_kfree_skb(skb);
@@ -892,7 +892,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
if (fp->tpa_state[i] == BNX2X_TPA_START)
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, DMA_FROM_DEVICE);
+ fp->rx_buf_size, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
rx_buf->skb = NULL;
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 5b44a8b..8d19d12 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -1618,7 +1618,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
/* prepare the loopback packet */
pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
- skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+ skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
if (!skb) {
rc = -ENOMEM;
goto test_loopback_exit;
@@ -2134,6 +2134,59 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
return 0;
}
+static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ void *rules __always_unused)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = BNX2X_NUM_ETH_QUEUES(bp);
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bnx2x_get_rxfh_indir(struct net_device *dev,
+ struct ethtool_rxfh_indir *indir)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ size_t copy_size =
+ min_t(size_t, indir->size, TSTORM_INDIRECTION_TABLE_SIZE);
+
+ if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
+ return -EOPNOTSUPP;
+
+ indir->size = TSTORM_INDIRECTION_TABLE_SIZE;
+ memcpy(indir->ring_index, bp->rx_indir_table,
+ copy_size * sizeof(bp->rx_indir_table[0]));
+ return 0;
+}
+
+static int bnx2x_set_rxfh_indir(struct net_device *dev,
+ const struct ethtool_rxfh_indir *indir)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ size_t i;
+
+ if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
+ return -EOPNOTSUPP;
+
+ /* Validate size and indices */
+ if (indir->size != TSTORM_INDIRECTION_TABLE_SIZE)
+ return -EINVAL;
+ for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
+ if (indir->ring_index[i] >= BNX2X_NUM_ETH_QUEUES(bp))
+ return -EINVAL;
+
+ memcpy(bp->rx_indir_table, indir->ring_index,
+ indir->size * sizeof(bp->rx_indir_table[0]));
+ bnx2x_push_indir_table(bp);
+ return 0;
+}
+
static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_settings = bnx2x_get_settings,
.set_settings = bnx2x_set_settings,
@@ -2170,6 +2223,9 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_strings = bnx2x_get_strings,
.phys_id = bnx2x_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
+ .get_rxnfc = bnx2x_get_rxnfc,
+ .get_rxfh_indir = bnx2x_get_rxfh_indir,
+ .set_rxfh_indir = bnx2x_set_rxfh_indir,
};
void bnx2x_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 548f563..be503cc 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -11,20 +11,27 @@
#include "bnx2x_fw_defs.h"
+#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
+
struct license_key {
u32 reserved[6];
-#if defined(__BIG_ENDIAN)
- u16 max_iscsi_init_conn;
- u16 max_iscsi_trgt_conn;
-#elif defined(__LITTLE_ENDIAN)
- u16 max_iscsi_trgt_conn;
- u16 max_iscsi_init_conn;
-#endif
+ u32 max_iscsi_conn;
+#define BNX2X_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF
+#define BNX2X_MAX_ISCSI_TRGT_CONN_SHIFT 0
+#define BNX2X_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000
+#define BNX2X_MAX_ISCSI_INIT_CONN_SHIFT 16
- u32 reserved_a[6];
-};
+ u32 reserved_a;
+
+ u32 max_fcoe_conn;
+#define BNX2X_MAX_FCOE_TRGT_CONN_MASK 0xFFFF
+#define BNX2X_MAX_FCOE_TRGT_CONN_SHIFT 0
+#define BNX2X_MAX_FCOE_INIT_CONN_MASK 0xFFFF0000
+#define BNX2X_MAX_FCOE_INIT_CONN_SHIFT 16
+ u32 reserved_b[4];
+};
#define PORT_0 0
#define PORT_1 1
@@ -237,8 +244,26 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16
- u32 Reserved0[16]; /* 0x158 */
-
+ u32 Reserved0[3]; /* 0x158 */
+ /* Controls the TX laser of the SFP+ module */
+ u32 sfp_ctrl; /* 0x164 */
+#define PORT_HW_CFG_TX_LASER_MASK 0x000000FF
+#define PORT_HW_CFG_TX_LASER_SHIFT 0
+#define PORT_HW_CFG_TX_LASER_MDIO 0x00000000
+#define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001
+#define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002
+#define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003
+#define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004
+
+ /* Controls the fault module LED of the SFP+ */
+#define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00
+#define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300
+#define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400
+ u32 Reserved01[12]; /* 0x158 */
/* for external PHY, or forced mode or during AN */
u16 xgxs_config_rx[4]; /* 0x198 */
@@ -246,12 +271,78 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
u32 Reserved1[56]; /* 0x1A8 */
u32 default_cfg; /* 0x288 */
+#define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003
+#define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0
+#define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000
+#define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001
+#define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002
+#define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003
+
+#define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C
+#define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2
+#define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000
+#define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004
+#define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008
+#define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c
+
+#define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030
+#define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4
+#define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000
+#define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010
+#define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020
+#define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030
+
+#define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0
+#define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6
+#define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000
+#define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040
+#define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080
+#define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0
+
+ /*
+ * When KR link is required to be set to force which is not
+ * KR-compliant, this parameter determine what is the trigger for it.
+ * When GPIO is selected, low input will force the speed. Currently
+ * default speed is 1G. In the future, it may be widen to select the
+ * forced speed in with another parameter. Note when force-1G is
+ * enabled, it override option 56: Link Speed option.
+ */
+#define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00
+#define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8
+#define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800
+#define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900
+ /* Enable to determine with which GPIO to reset the external phy */
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000
/* Enable BAM on KR */
#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
+ /* Enable Common Mode Sense */
+#define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000
+#define PORT_HW_CFG_ENABLE_CMS_SHIFT 21
+#define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000
+#define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000
+
u32 speed_capability_mask2; /* 0x28C */
#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
@@ -381,6 +472,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00
+#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index dd1210f..f2f367d 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -1,4 +1,4 @@
-/* Copyright 2008-2009 Broadcom Corporation
+/* Copyright 2008-2011 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -28,12 +28,13 @@
/********************************************************/
#define ETH_HLEN 14
-#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)/* 16 for CRC + VLAN + LLC */
+/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
#define ETH_MIN_PACKET_SIZE 60
#define ETH_MAX_PACKET_SIZE 1500
#define ETH_MAX_JUMBO_PACKET_SIZE 9600
#define MDIO_ACCESS_TIMEOUT 1000
-#define BMAC_CONTROL_RX_ENABLE 2
+#define BMAC_CONTROL_RX_ENABLE 2
/***********************************************************/
/* Shortcut definitions */
@@ -79,7 +80,7 @@
#define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37
#define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73
-#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
+#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM
#define AUTONEG_PARALLEL \
SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
#define AUTONEG_SGMII_FIBER_AUTODET \
@@ -112,10 +113,10 @@
#define GP_STATUS_10G_KX4 \
MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
-#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
-#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
+#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
+#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
-#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
+#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4
#define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
#define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD
#define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
@@ -123,18 +124,18 @@
#define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD
#define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
#define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
-#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
-#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
-#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
-#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
+#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
+#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
+#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
+#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
#define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD
#define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD
-#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
-#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
-#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
-#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
-#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
-#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
+#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
+#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
+#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
+#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
+#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
+#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
#define PHY_XGXS_FLAG 0x1
#define PHY_SGMII_FLAG 0x2
@@ -142,7 +143,7 @@
/* */
#define SFP_EEPROM_CON_TYPE_ADDR 0x2
- #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
+ #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
#define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
@@ -153,15 +154,15 @@
#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
#define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
- #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
+ #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8
-#define SFP_EEPROM_OPTIONS_ADDR 0x40
+#define SFP_EEPROM_OPTIONS_ADDR 0x40
#define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
-#define SFP_EEPROM_OPTIONS_SIZE 2
+#define SFP_EEPROM_OPTIONS_SIZE 2
-#define EDC_MODE_LINEAR 0x0022
-#define EDC_MODE_LIMITING 0x0044
-#define EDC_MODE_PASSIVE_DAC 0x0055
+#define EDC_MODE_LINEAR 0x0022
+#define EDC_MODE_LIMITING 0x0044
+#define EDC_MODE_PASSIVE_DAC 0x0055
#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
@@ -170,24 +171,18 @@
/* INTERFACE */
/**********************************************************/
-#define CL45_WR_OVER_CL22(_bp, _phy, _bank, _addr, _val) \
+#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
bnx2x_cl45_write(_bp, _phy, \
(_phy)->def_md_devad, \
(_bank + (_addr & 0xf)), \
_val)
-#define CL45_RD_OVER_CL22(_bp, _phy, _bank, _addr, _val) \
+#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
bnx2x_cl45_read(_bp, _phy, \
(_phy)->def_md_devad, \
(_bank + (_addr & 0xf)), \
_val)
-static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 *ret_val);
-
-static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 val);
-
static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
{
u32 val = REG_RD(bp, reg);
@@ -216,7 +211,7 @@ void bnx2x_ets_disabled(struct link_params *params)
DP(NETIF_MSG_LINK, "ETS disabled configuration\n");
- /**
+ /*
* mapping between entry priority to client number (0,1,2 -debug and
* management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
* 3bits client num.
@@ -225,7 +220,7 @@ void bnx2x_ets_disabled(struct link_params *params)
*/
REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
- /**
+ /*
* Bitmap of 5bits length. Each bit specifies whether the entry behaves
* as strict. Bits 0,1,2 - debug and management entries, 3 -
* COS0 entry, 4 - COS1 entry.
@@ -237,12 +232,12 @@ void bnx2x_ets_disabled(struct link_params *params)
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
/* defines which entries (clients) are subjected to WFQ arbitration */
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
- /**
- * For strict priority entries defines the number of consecutive
- * slots for the highest priority.
- */
+ /*
+ * For strict priority entries defines the number of consecutive
+ * slots for the highest priority.
+ */
REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
- /**
+ /*
* mapping between the CREDIT_WEIGHT registers and actual client
* numbers
*/
@@ -255,7 +250,7 @@ void bnx2x_ets_disabled(struct link_params *params)
REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
/* ETS mode disable */
REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
- /**
+ /*
* If ETS mode is enabled (there is no strict priority) defines a WFQ
* weight for COS0/COS1.
*/
@@ -268,24 +263,24 @@ void bnx2x_ets_disabled(struct link_params *params)
REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
}
-void bnx2x_ets_bw_limit_common(const struct link_params *params)
+static void bnx2x_ets_bw_limit_common(const struct link_params *params)
{
/* ETS disabled configuration */
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
- /**
- * defines which entries (clients) are subjected to WFQ arbitration
- * COS0 0x8
- * COS1 0x10
- */
+ /*
+ * defines which entries (clients) are subjected to WFQ arbitration
+ * COS0 0x8
+ * COS1 0x10
+ */
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
- /**
- * mapping between the ARB_CREDIT_WEIGHT registers and actual
- * client numbers (WEIGHT_0 does not actually have to represent
- * client 0)
- * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
- * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010
- */
+ /*
+ * mapping between the ARB_CREDIT_WEIGHT registers and actual
+ * client numbers (WEIGHT_0 does not actually have to represent
+ * client 0)
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010
+ */
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
@@ -298,14 +293,14 @@ void bnx2x_ets_bw_limit_common(const struct link_params *params)
/* Defines the number of consecutive slots for the strict priority */
REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
- /**
- * Bitmap of 5bits length. Each bit specifies whether the entry behaves
- * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
- * entry, 4 - COS1 entry.
- * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
- * bit4 bit3 bit2 bit1 bit0
- * MCP and debug are strict
- */
+ /*
+ * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+ * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
+ * entry, 4 - COS1 entry.
+ * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
+ * bit4 bit3 bit2 bit1 bit0
+ * MCP and debug are strict
+ */
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
/* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
@@ -329,8 +324,7 @@ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
if ((0 == total_bw) ||
(0 == cos0_bw) ||
(0 == cos1_bw)) {
- DP(NETIF_MSG_LINK,
- "bnx2x_ets_bw_limit: Total BW can't be zero\n");
+ DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
return;
}
@@ -355,7 +349,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
u32 val = 0;
DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
- /**
+ /*
* Bitmap of 5bits length. Each bit specifies whether the entry behaves
* as strict. Bits 0,1,2 - debug and management entries,
* 3 - COS0 entry, 4 - COS1 entry.
@@ -364,7 +358,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
* MCP and debug are strict
*/
REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
- /**
+ /*
* For strict priority entries defines the number of consecutive slots
* for the highest priority.
*/
@@ -377,14 +371,14 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
/* Defines the number of consecutive slots for the strict priority */
REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
- /**
- * mapping between entry priority to client number (0,1,2 -debug and
- * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
- * 3bits client num.
- * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
- * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
- * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
- */
+ /*
+ * mapping between entry priority to client number (0,1,2 -debug and
+ * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
+ * 3bits client num.
+ * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
+ * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
+ * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
+ */
val = (0 == strict_cos) ? 0x2318 : 0x22E0;
REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
@@ -471,7 +465,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
/* MAC/PBF section */
/******************************************************************/
static void bnx2x_emac_init(struct link_params *params,
- struct link_vars *vars)
+ struct link_vars *vars)
{
/* reset and unreset the emac core */
struct bnx2x *bp = params->bp;
@@ -481,10 +475,10 @@ static void bnx2x_emac_init(struct link_params *params,
u16 timeout;
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
- (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+ (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
udelay(5);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
- (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+ (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
/* init emac - use read-modify-write */
/* self clear reset */
@@ -515,7 +509,7 @@ static void bnx2x_emac_init(struct link_params *params,
}
static u8 bnx2x_emac_enable(struct link_params *params,
- struct link_vars *vars, u8 lb)
+ struct link_vars *vars, u8 lb)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -527,55 +521,33 @@ static u8 bnx2x_emac_enable(struct link_params *params,
/* enable emac and not bmac */
REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
- /* for paladium */
- if (CHIP_REV_IS_EMUL(bp)) {
- /* Use lane 1 (of lanes 0-3) */
- REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
- REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
- port*4, 1);
- }
- /* for fpga */
- else
-
- if (CHIP_REV_IS_FPGA(bp)) {
- /* Use lane 1 (of lanes 0-3) */
- DP(NETIF_MSG_LINK, "bnx2x_emac_enable: Setting FPGA\n");
-
- REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
- REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4,
- 0);
- } else
/* ASIC */
if (vars->phy_flags & PHY_XGXS_FLAG) {
u32 ser_lane = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
DP(NETIF_MSG_LINK, "XGXS\n");
/* select the master lanes (out of 0-3) */
- REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 +
- port*4, ser_lane);
+ REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane);
/* select XGXS */
- REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
- port*4, 1);
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
} else { /* SerDes */
DP(NETIF_MSG_LINK, "SerDes\n");
/* select SerDes */
- REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
- port*4, 0);
+ REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
}
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
- EMAC_RX_MODE_RESET);
+ EMAC_RX_MODE_RESET);
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
- EMAC_TX_MODE_RESET);
+ EMAC_TX_MODE_RESET);
if (CHIP_REV_IS_SLOW(bp)) {
/* config GMII mode */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
- EMAC_WR(bp, EMAC_REG_EMAC_MODE,
- (val | EMAC_MODE_PORT_GMII));
+ EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
} else { /* ASIC */
/* pause enable/disable */
bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
@@ -605,14 +577,14 @@ static u8 bnx2x_emac_enable(struct link_params *params,
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
- /**
- * Setting this bit causes MAC control frames (except for pause
- * frames) to be passed on for processing. This setting has no
- * affect on the operation of the pause frames. This bit effects
- * all packets regardless of RX Parser packet sorting logic.
- * Turn the PFC off to make sure we are in Xon state before
- * enabling it.
- */
+ /*
+ * Setting this bit causes MAC control frames (except for pause
+ * frames) to be passed on for processing. This setting has no
+ * affect on the operation of the pause frames. This bit effects
+ * all packets regardless of RX Parser packet sorting logic.
+ * Turn the PFC off to make sure we are in Xon state before
+ * enabling it.
+ */
EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
DP(NETIF_MSG_LINK, "PFC is enabled\n");
@@ -666,16 +638,7 @@ static u8 bnx2x_emac_enable(struct link_params *params,
REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
- if (CHIP_REV_IS_EMUL(bp)) {
- /* take the BigMac out of reset */
- REG_WR(bp,
- GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
-
- /* enable access for bmac registers */
- REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
- } else
- REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
+ REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
vars->mac_type = MAC_TYPE_EMAC;
return 0;
@@ -731,8 +694,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
val |= (1<<5);
wb_data[0] = val;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2);
udelay(30);
/* Tx control */
@@ -768,12 +730,12 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
- /**
- * Set Time (based unit is 512 bit time) between automatic
- * re-sending of PP packets amd enable automatic re-send of
- * Per-Priroity Packet as long as pp_gen is asserted and
- * pp_disable is low.
- */
+ /*
+ * Set Time (based unit is 512 bit time) between automatic
+ * re-sending of PP packets amd enable automatic re-send of
+ * Per-Priroity Packet as long as pp_gen is asserted and
+ * pp_disable is low.
+ */
val = 0x8000;
if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
val |= (1<<16); /* enable automatic re-send */
@@ -781,7 +743,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
wb_data[0] = val;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
- wb_data, 2);
+ wb_data, 2);
/* mac control */
val = 0x3; /* Enable RX and TX */
@@ -795,8 +757,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
wb_data[0] = val;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
}
static void bnx2x_update_pfc_brb(struct link_params *params,
@@ -825,17 +786,25 @@ static void bnx2x_update_pfc_brb(struct link_params *params,
full_xon_th =
PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
}
- /* The number of free blocks below which the pause signal to class 0
- of MAC #n is asserted. n=0,1 */
+ /*
+ * The number of free blocks below which the pause signal to class 0
+ * of MAC #n is asserted. n=0,1
+ */
REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th);
- /* The number of free blocks above which the pause signal to class 0
- of MAC #n is de-asserted. n=0,1 */
+ /*
+ * The number of free blocks above which the pause signal to class 0
+ * of MAC #n is de-asserted. n=0,1
+ */
REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th);
- /* The number of free blocks below which the full signal to class 0
- of MAC #n is asserted. n=0,1 */
+ /*
+ * The number of free blocks below which the full signal to class 0
+ * of MAC #n is asserted. n=0,1
+ */
REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th);
- /* The number of free blocks above which the full signal to class 0
- of MAC #n is de-asserted. n=0,1 */
+ /*
+ * The number of free blocks above which the full signal to class 0
+ * of MAC #n is de-asserted. n=0,1
+ */
REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th);
if (set_pfc && pfc_params) {
@@ -859,25 +828,25 @@ static void bnx2x_update_pfc_brb(struct link_params *params,
full_xon_th =
PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
}
- /**
+ /*
* The number of free blocks below which the pause signal to
* class 1 of MAC #n is asserted. n=0,1
- **/
+ */
REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th);
- /**
+ /*
* The number of free blocks above which the pause signal to
* class 1 of MAC #n is de-asserted. n=0,1
- **/
+ */
REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th);
- /**
+ /*
* The number of free blocks below which the full signal to
* class 1 of MAC #n is asserted. n=0,1
- **/
+ */
REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th);
- /**
+ /*
* The number of free blocks above which the full signal to
* class 1 of MAC #n is de-asserted. n=0,1
- **/
+ */
REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th);
}
}
@@ -896,7 +865,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
FEATURE_CONFIG_PFC_ENABLED;
DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
- /**
+ /*
* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
* MAC control frames (that are not pause packets)
* will be forwarded to the XCM.
@@ -904,7 +873,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
xcm_mask = REG_RD(bp,
port ? NIG_REG_LLH1_XCM_MASK :
NIG_REG_LLH0_XCM_MASK);
- /**
+ /*
* nig params will override non PFC params, since it's possible to
* do transition from PFC to SAFC
*/
@@ -994,7 +963,7 @@ void bnx2x_update_pfc(struct link_params *params,
struct link_vars *vars,
struct bnx2x_nig_brb_pfc_port_params *pfc_params)
{
- /**
+ /*
* The PFC and pause are orthogonal to one another, meaning when
* PFC is enabled, the pause are disabled, and when PFC is
* disabled, pause are set according to the pause result.
@@ -1035,7 +1004,7 @@ void bnx2x_update_pfc(struct link_params *params,
static u8 bnx2x_bmac1_enable(struct link_params *params,
struct link_vars *vars,
- u8 is_lb)
+ u8 is_lb)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -1049,9 +1018,8 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
/* XGXS control */
wb_data[0] = 0x3c;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr +
- BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
+ wb_data, 2);
/* tx MAC SA */
wb_data[0] = ((params->mac_addr[2] << 24) |
@@ -1060,8 +1028,7 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
params->mac_addr[5]);
wb_data[1] = ((params->mac_addr[0] << 8) |
params->mac_addr[1]);
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
/* mac control */
val = 0x3;
@@ -1071,43 +1038,30 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
}
wb_data[0] = val;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
/* set rx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
bnx2x_update_pfc_bmac1(params, vars);
/* set tx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
/* set cnt max size */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
/* configure safc */
wb_data[0] = 0x1000200;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
wb_data, 2);
- /* fix for emulation */
- if (CHIP_REV_IS_EMUL(bp)) {
- wb_data[0] = 0xf000;
- wb_data[1] = 0;
- REG_WR_DMAE(bp,
- bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
- wb_data, 2);
- }
-
return 0;
}
@@ -1126,16 +1080,14 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
wb_data[0] = 0;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
udelay(30);
/* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
wb_data[0] = 0x3c;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr +
- BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
+ wb_data, 2);
udelay(30);
@@ -1147,7 +1099,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
wb_data[1] = ((params->mac_addr[0] << 8) |
params->mac_addr[1]);
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
- wb_data, 2);
+ wb_data, 2);
udelay(30);
@@ -1155,27 +1107,24 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
wb_data[0] = 0x1000200;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
- wb_data, 2);
+ wb_data, 2);
udelay(30);
/* set rx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
udelay(30);
/* set tx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
udelay(30);
/* set cnt max size */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
wb_data[1] = 0;
- REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE,
- wb_data, 2);
+ REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
udelay(30);
bnx2x_update_pfc_bmac2(params, vars, is_lb);
@@ -1191,11 +1140,11 @@ static u8 bnx2x_bmac_enable(struct link_params *params,
u32 val;
/* reset and unreset the BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
msleep(1);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
/* enable access for bmac registers */
REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
@@ -1230,15 +1179,14 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
struct bnx2x *bp = params->bp;
REG_WR(bp, params->shmem_base +
- offsetof(struct shmem_region,
- port_mb[params->port].link_status),
- link_status);
+ offsetof(struct shmem_region,
+ port_mb[params->port].link_status), link_status);
}
static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
{
u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
- NIG_REG_INGRESS_BMAC0_MEM;
+ NIG_REG_INGRESS_BMAC0_MEM;
u32 wb_data[2];
u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
@@ -1250,12 +1198,12 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
if (CHIP_IS_E2(bp)) {
/* Clear Rx Enable bit in BMAC_CONTROL register */
REG_RD_DMAE(bp, bmac_addr +
- BIGMAC2_REGISTER_BMAC_CONTROL,
- wb_data, 2);
+ BIGMAC2_REGISTER_BMAC_CONTROL,
+ wb_data, 2);
wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
REG_WR_DMAE(bp, bmac_addr +
- BIGMAC2_REGISTER_BMAC_CONTROL,
- wb_data, 2);
+ BIGMAC2_REGISTER_BMAC_CONTROL,
+ wb_data, 2);
} else {
/* Clear Rx Enable bit in BMAC_CONTROL register */
REG_RD_DMAE(bp, bmac_addr +
@@ -1271,7 +1219,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
}
static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
- u32 line_speed)
+ u32 line_speed)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -1308,7 +1256,7 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
/* update threshold */
REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
/* update init credit */
- init_crd = 778; /* (800-18-4) */
+ init_crd = 778; /* (800-18-4) */
} else {
u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
@@ -1353,6 +1301,23 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
return 0;
}
+/*
+ * get_emac_base
+ *
+ * @param cb
+ * @param mdc_mdio_access
+ * @param port
+ *
+ * @return u32
+ *
+ * This function selects the MDC/MDIO access (through emac0 or
+ * emac1) depend on the mdc_mdio_access, port, port swapped. Each
+ * phy has a default access mode, which could also be overridden
+ * by nvram configuration. This parameter, whether this is the
+ * default phy configuration, or the nvram overrun
+ * configuration, is passed here as mdc_mdio_access and selects
+ * the emac_base for the CL45 read/writes operations
+ */
static u32 bnx2x_get_emac_base(struct bnx2x *bp,
u32 mdc_mdio_access, u8 port)
{
@@ -1385,13 +1350,16 @@ static u32 bnx2x_get_emac_base(struct bnx2x *bp,
}
-u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 val)
+/******************************************************************/
+/* CL45 access functions */
+/******************************************************************/
+static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 val)
{
u32 tmp, saved_mode;
u8 i, rc = 0;
-
- /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
+ /*
+ * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
* (a value of 49==0x31) and make sure that the AUTO poll is off
*/
@@ -1414,8 +1382,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
for (i = 0; i < 50; i++) {
udelay(10);
- tmp = REG_RD(bp, phy->mdio_ctrl +
- EMAC_REG_EMAC_MDIO_COMM);
+ tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
udelay(5);
break;
@@ -1423,6 +1390,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
}
if (tmp & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "write phy register failed\n");
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
rc = -EFAULT;
} else {
/* data */
@@ -1435,7 +1403,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
udelay(10);
tmp = REG_RD(bp, phy->mdio_ctrl +
- EMAC_REG_EMAC_MDIO_COMM);
+ EMAC_REG_EMAC_MDIO_COMM);
if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
udelay(5);
break;
@@ -1443,6 +1411,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
}
if (tmp & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "write phy register failed\n");
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
rc = -EFAULT;
}
}
@@ -1453,20 +1422,20 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
return rc;
}
-u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 *ret_val)
+static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 *ret_val)
{
u32 val, saved_mode;
u16 i;
u8 rc = 0;
-
- /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
+ /*
+ * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
* (a value of 49==0x31) and make sure that the AUTO poll is off
*/
saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL |
- EMAC_MDIO_MODE_CLOCK_CNT));
+ EMAC_MDIO_MODE_CLOCK_CNT));
val |= (EMAC_MDIO_MODE_CLAUSE_45 |
(49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
@@ -1490,7 +1459,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
}
if (val & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "read phy register failed\n");
-
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
*ret_val = 0;
rc = -EFAULT;
@@ -1505,7 +1474,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
udelay(10);
val = REG_RD(bp, phy->mdio_ctrl +
- EMAC_REG_EMAC_MDIO_COMM);
+ EMAC_REG_EMAC_MDIO_COMM);
if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
*ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
break;
@@ -1513,7 +1482,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
}
if (val & EMAC_MDIO_COMM_START_BUSY) {
DP(NETIF_MSG_LINK, "read phy register failed\n");
-
+ netdev_err(bp->dev, "MDC/MDIO access timeout\n");
*ret_val = 0;
rc = -EFAULT;
}
@@ -1529,7 +1498,7 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
u8 devad, u16 reg, u16 *ret_val)
{
u8 phy_index;
- /**
+ /*
* Probe for the phy according to the given phy_addr, and execute
* the read request on it
*/
@@ -1547,7 +1516,7 @@ u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
u8 devad, u16 reg, u16 val)
{
u8 phy_index;
- /**
+ /*
* Probe for the phy according to the given phy_addr, and execute
* the write request on it
*/
@@ -1576,16 +1545,15 @@ static void bnx2x_set_aer_mmd_xgxs(struct link_params *params,
aer_val = 0x3800 + offset - 1;
else
aer_val = 0x3800 + offset;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_AER_BLOCK,
- MDIO_AER_BLOCK_AER_REG, aer_val);
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, aer_val);
}
static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp,
struct bnx2x_phy *phy)
{
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_AER_BLOCK,
- MDIO_AER_BLOCK_AER_REG, 0x3800);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0x3800);
}
/******************************************************************/
@@ -1621,9 +1589,8 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
bnx2x_set_serdes_access(bp, port);
- REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD +
- port*0x10,
- DEFAULT_PHY_DEV_ADDR);
+ REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10,
+ DEFAULT_PHY_DEV_ADDR);
}
static void bnx2x_xgxs_deassert(struct link_params *params)
@@ -1641,23 +1608,22 @@ static void bnx2x_xgxs_deassert(struct link_params *params)
udelay(500);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
- REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST +
- port*0x18, 0);
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0);
REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
- params->phy[INT_PHY].def_md_devad);
+ params->phy[INT_PHY].def_md_devad);
}
void bnx2x_link_status_update(struct link_params *params,
- struct link_vars *vars)
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 link_10g;
u8 port = params->port;
vars->link_status = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region,
- port_mb[port].link_status));
+ offsetof(struct shmem_region,
+ port_mb[port].link_status));
vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
@@ -1667,7 +1633,7 @@ void bnx2x_link_status_update(struct link_params *params,
vars->phy_link_up = 1;
vars->duplex = DUPLEX_FULL;
switch (vars->link_status &
- LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
+ LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
case LINK_10THD:
vars->duplex = DUPLEX_HALF;
/* fall thru */
@@ -1779,20 +1745,20 @@ static void bnx2x_set_master_ln(struct link_params *params,
{
struct bnx2x *bp = params->bp;
u16 new_master_ln, ser_lane;
- ser_lane = ((params->lane_config &
+ ser_lane = ((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
/* set the master_ln for AN */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
- &new_master_ln);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+ &new_master_ln);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2 ,
- MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
- (new_master_ln | ser_lane));
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2 ,
+ MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+ (new_master_ln | ser_lane));
}
static u8 bnx2x_reset_unicore(struct link_params *params,
@@ -1802,17 +1768,16 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
struct bnx2x *bp = params->bp;
u16 mii_control;
u16 i;
-
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
/* reset the unicore */
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- (mii_control |
- MDIO_COMBO_IEEO_MII_CONTROL_RESET));
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ (mii_control |
+ MDIO_COMBO_IEEO_MII_CONTROL_RESET));
if (set_serdes)
bnx2x_set_serdes_access(bp, params->port);
@@ -1821,10 +1786,10 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
udelay(5);
/* the reset erased the previous bank value */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- &mii_control);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
udelay(5);
@@ -1832,6 +1797,9 @@ static u8 bnx2x_reset_unicore(struct link_params *params,
}
}
+ netdev_err(bp->dev, "Warning: PHY was not initialized,"
+ " Port %d\n",
+ params->port);
DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
return -EINVAL;
@@ -1841,43 +1809,45 @@ static void bnx2x_set_swap_lanes(struct link_params *params,
struct bnx2x_phy *phy)
{
struct bnx2x *bp = params->bp;
- /* Each two bits represents a lane number:
- No swap is 0123 => 0x1b no need to enable the swap */
+ /*
+ * Each two bits represents a lane number:
+ * No swap is 0123 => 0x1b no need to enable the swap
+ */
u16 ser_lane, rx_lane_swap, tx_lane_swap;
ser_lane = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
rx_lane_swap = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
+ PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
tx_lane_swap = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
+ PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
+ PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
if (rx_lane_swap != 0x1b) {
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_RX_LN_SWAP,
- (rx_lane_swap |
- MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
- MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP,
+ (rx_lane_swap |
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
} else {
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
}
if (tx_lane_swap != 0x1b) {
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_TX_LN_SWAP,
- (tx_lane_swap |
- MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP,
+ (tx_lane_swap |
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
} else {
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
}
}
@@ -1886,66 +1856,66 @@ static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 control2;
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
- &control2);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+ &control2);
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
else
control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
phy->speed_cap_mask, control2);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
- control2);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+ control2);
if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
(phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
DP(NETIF_MSG_LINK, "XGXS\n");
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_10G_PARALLEL_DETECT,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_10G_PARALLEL_DETECT,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
- &control2);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+ &control2);
control2 |=
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_10G_PARALLEL_DETECT,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
- control2);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+ control2);
/* Disable parallel detection of HiG */
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_XGXS_BLOCK2,
- MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
- MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
- MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_XGXS_BLOCK2,
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
+ MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
}
}
static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
struct link_params *params,
- struct link_vars *vars,
- u8 enable_cl73)
+ struct link_vars *vars,
+ u8 enable_cl73)
{
struct bnx2x *bp = params->bp;
u16 reg_val;
/* CL37 Autoneg */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
/* CL37 Autoneg Enabled */
if (vars->line_speed == SPEED_AUTO_NEG)
@@ -1954,15 +1924,15 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
/* Enable/Disable Autodetection */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
@@ -1971,14 +1941,14 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
else
reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
/* Enable TetonII and BAM autoneg */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_BAM_NEXT_PAGE,
- MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_BAM_NEXT_PAGE,
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
&reg_val);
if (vars->line_speed == SPEED_AUTO_NEG) {
/* Enable BAM aneg Mode and TetonII aneg Mode */
@@ -1989,20 +1959,20 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
}
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_BAM_NEXT_PAGE,
- MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
- reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_BAM_NEXT_PAGE,
+ MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+ reg_val);
if (enable_cl73) {
/* Enable Cl73 FSM status bits */
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_USERB0,
- MDIO_CL73_USERB0_CL73_UCTRL,
- 0xe);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_USERB0,
+ MDIO_CL73_USERB0_CL73_UCTRL,
+ 0xe);
/* Enable BAM Station Manager*/
- CL45_WR_OVER_CL22(bp, phy,
+ CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_CL73_USERB0,
MDIO_CL73_USERB0_CL73_BAM_CTRL1,
MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
@@ -2010,10 +1980,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
/* Advertise CL73 link speeds */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV2,
- &reg_val);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV2,
+ &reg_val);
if (phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
@@ -2021,10 +1991,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV2,
- reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV2,
+ reg_val);
/* CL73 Autoneg Enabled */
reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
@@ -2032,37 +2002,39 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
} else /* CL73 Autoneg Disabled */
reg_val = 0;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB0,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
}
/* program SerDes, forced speed */
static void bnx2x_program_serdes(struct bnx2x_phy *phy,
struct link_params *params,
- struct link_vars *vars)
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 reg_val;
/* program duplex, disable autoneg and sgmii*/
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
if (phy->req_duplex == DUPLEX_FULL)
reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
-
- /* program speed
- - needed only if the speed is greater than 1G (2.5G or 10G) */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_MISC1, &reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+
+ /*
+ * program speed
+ * - needed only if the speed is greater than 1G (2.5G or 10G)
+ */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_MISC1, &reg_val);
/* clearing the speed value before setting the right speed */
DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
@@ -2083,9 +2055,9 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
}
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_MISC1, reg_val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_MISC1, reg_val);
}
@@ -2102,13 +2074,13 @@ static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy,
val |= MDIO_OVER_1G_UP1_2_5G;
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
val |= MDIO_OVER_1G_UP1_10G;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_OVER_1G,
- MDIO_OVER_1G_UP1, val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_UP1, val);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_OVER_1G,
- MDIO_OVER_1G_UP3, 0x400);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_UP3, 0x400);
}
static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
@@ -2116,22 +2088,21 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
*ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
- /* resolve pause mode and advertisement
- * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
+ /*
+ * Resolve pause mode and advertisement.
+ * Please refer to Table 28B-3 of the 802.3ab-1999 spec
+ */
switch (phy->req_flow_ctrl) {
case BNX2X_FLOW_CTRL_AUTO:
- if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) {
- *ieee_fc |=
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
- } else {
+ if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+ else
*ieee_fc |=
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
- }
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
break;
case BNX2X_FLOW_CTRL_TX:
- *ieee_fc |=
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+ *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
break;
case BNX2X_FLOW_CTRL_RX:
@@ -2149,23 +2120,23 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy,
struct link_params *params,
- u16 ieee_fc)
+ u16 ieee_fc)
{
struct bnx2x *bp = params->bp;
u16 val;
/* for AN, we are always publishing full duplex */
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV1, &val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1, &val);
val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV1, val);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1, val);
}
static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
@@ -2179,67 +2150,67 @@ static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
/* Enable and restart BAM/CL37 aneg */
if (enable_cl73) {
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB0,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
- &mii_control);
-
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB0,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
- (mii_control |
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ &mii_control);
+
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ (mii_control |
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
} else {
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- &mii_control);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
DP(NETIF_MSG_LINK,
"bnx2x_restart_autoneg mii_control before = 0x%x\n",
mii_control);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- (mii_control |
- MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
- MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ (mii_control |
+ MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+ MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
}
}
static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
struct link_params *params,
- struct link_vars *vars)
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u16 control1;
/* in SGMII mode, the unicore is always slave */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
- &control1);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+ &control1);
control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
/* set sgmii mode (and not fiber) */
control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
- control1);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+ control1);
/* if forced speed */
if (!(vars->line_speed == SPEED_AUTO_NEG)) {
/* set speed, disable autoneg */
u16 mii_control;
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- &mii_control);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ &mii_control);
mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
@@ -2267,10 +2238,10 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
if (phy->req_duplex == DUPLEX_FULL)
mii_control |=
MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_MII_CONTROL,
- mii_control);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_MII_CONTROL,
+ mii_control);
} else { /* AN mode */
/* enable and restart AN */
@@ -2285,19 +2256,19 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
{ /* LD LP */
- switch (pause_result) { /* ASYM P ASYM P */
- case 0xb: /* 1 0 1 1 */
+ switch (pause_result) { /* ASYM P ASYM P */
+ case 0xb: /* 1 0 1 1 */
vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
break;
- case 0xe: /* 1 1 1 0 */
+ case 0xe: /* 1 1 1 0 */
vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
break;
- case 0x5: /* 0 1 0 1 */
- case 0x7: /* 0 1 1 1 */
- case 0xd: /* 1 1 0 1 */
- case 0xf: /* 1 1 1 1 */
+ case 0x5: /* 0 1 0 1 */
+ case 0x7: /* 0 1 1 1 */
+ case 0xd: /* 1 1 0 1 */
+ case 0xf: /* 1 1 1 1 */
vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
break;
@@ -2317,24 +2288,24 @@ static u8 bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
u16 pd_10g, status2_1000x;
if (phy->req_line_speed != SPEED_AUTO_NEG)
return 0;
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
- &status2_1000x);
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_SERDES_DIGITAL,
- MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
- &status2_1000x);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+ &status2_1000x);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_SERDES_DIGITAL,
+ MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+ &status2_1000x);
if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
params->port);
return 1;
}
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_10G_PARALLEL_DETECT,
- MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
- &pd_10g);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_10G_PARALLEL_DETECT,
+ MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
+ &pd_10g);
if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
@@ -2373,14 +2344,14 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
(MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV1,
- &ld_pause);
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_LP_ADV1,
- &lp_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1,
+ &ld_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_LP_ADV1,
+ &lp_pause);
pause_result = (ld_pause &
MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
>> 8;
@@ -2390,18 +2361,18 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
pause_result);
} else {
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
- &ld_pause);
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
- &lp_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
+ &ld_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
+ &lp_pause);
pause_result = (ld_pause &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
pause_result |= (lp_pause &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
pause_result);
}
@@ -2417,25 +2388,25 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
u16 rx_status, ustat_val, cl37_fsm_recieved;
DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
/* Step 1: Make sure signal is detected */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_RX0,
- MDIO_RX0_RX_STATUS,
- &rx_status);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_RX0,
+ MDIO_RX0_RX_STATUS,
+ &rx_status);
if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
(MDIO_RX0_RX_STATUS_SIGDET)) {
DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
"rx_status(0x80b0) = 0x%x\n", rx_status);
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB0,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
return;
}
/* Step 2: Check CL73 state machine */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_USERB0,
- MDIO_CL73_USERB0_CL73_USTAT1,
- &ustat_val);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_USERB0,
+ MDIO_CL73_USERB0_CL73_USTAT1,
+ &ustat_val);
if ((ustat_val &
(MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
@@ -2445,12 +2416,14 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
"ustat_val(0x8371) = 0x%x\n", ustat_val);
return;
}
- /* Step 3: Check CL37 Message Pages received to indicate LP
- supports only CL37 */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_REMOTE_PHY,
- MDIO_REMOTE_PHY_MISC_RX_STATUS,
- &cl37_fsm_recieved);
+ /*
+ * Step 3: Check CL37 Message Pages received to indicate LP
+ * supports only CL37
+ */
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_REMOTE_PHY,
+ MDIO_REMOTE_PHY_MISC_RX_STATUS,
+ &cl37_fsm_recieved);
if ((cl37_fsm_recieved &
(MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
@@ -2461,14 +2434,18 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
cl37_fsm_recieved);
return;
}
- /* The combined cl37/cl73 fsm state information indicating that we are
- connected to a device which does not support cl73, but does support
- cl37 BAM. In this case we disable cl73 and restart cl37 auto-neg */
+ /*
+ * The combined cl37/cl73 fsm state information indicating that
+ * we are connected to a device which does not support cl73, but
+ * does support cl37 BAM. In this case we disable cl73 and
+ * restart cl37 auto-neg
+ */
+
/* Disable CL73 */
- CL45_WR_OVER_CL22(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB0,
- MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
- 0);
+ CL22_WR_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB0,
+ MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+ 0);
/* Restart CL37 autoneg */
bnx2x_restart_autoneg(phy, params, 0);
DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
@@ -2493,14 +2470,14 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u16 new_line_speed , gp_status;
+ u16 new_line_speed, gp_status;
u8 rc = 0;
/* Read gp_status */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_GP_STATUS,
- MDIO_GP_STATUS_TOP_AN_STATUS1,
- &gp_status);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
if (phy->req_line_speed == SPEED_AUTO_NEG)
vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
@@ -2637,9 +2614,9 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
u16 bank;
/* read precomp */
- CL45_RD_OVER_CL22(bp, phy,
- MDIO_REG_BANK_OVER_1G,
- MDIO_OVER_1G_LP_UP2, &lp_up2);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_LP_UP2, &lp_up2);
/* bits [10:7] at lp_up2, positioned at [15:12] */
lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
@@ -2651,18 +2628,18 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
- CL45_RD_OVER_CL22(bp, phy,
- bank,
- MDIO_TX0_TX_DRIVER, &tx_driver);
+ CL22_RD_OVER_CL45(bp, phy,
+ bank,
+ MDIO_TX0_TX_DRIVER, &tx_driver);
/* replace tx_driver bits [15:12] */
if (lp_up2 !=
(tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
tx_driver |= lp_up2;
- CL45_WR_OVER_CL22(bp, phy,
- bank,
- MDIO_TX0_TX_DRIVER, tx_driver);
+ CL22_WR_OVER_CL45(bp, phy,
+ bank,
+ MDIO_TX0_TX_DRIVER, tx_driver);
}
}
}
@@ -2676,10 +2653,10 @@ static u8 bnx2x_emac_program(struct link_params *params,
DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
- EMAC_REG_EMAC_MODE,
- (EMAC_MODE_25G_MODE |
- EMAC_MODE_PORT_MII_10M |
- EMAC_MODE_HALF_DUPLEX));
+ EMAC_REG_EMAC_MODE,
+ (EMAC_MODE_25G_MODE |
+ EMAC_MODE_PORT_MII_10M |
+ EMAC_MODE_HALF_DUPLEX));
switch (vars->line_speed) {
case SPEED_10:
mode |= EMAC_MODE_PORT_MII_10M;
@@ -2707,8 +2684,8 @@ static u8 bnx2x_emac_program(struct link_params *params,
if (vars->duplex == DUPLEX_HALF)
mode |= EMAC_MODE_HALF_DUPLEX;
bnx2x_bits_en(bp,
- GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
- mode);
+ GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
+ mode);
bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
return 0;
@@ -2723,7 +2700,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
- CL45_WR_OVER_CL22(bp, phy,
+ CL22_WR_OVER_CL45(bp, phy,
bank,
MDIO_RX0_RX_EQ_BOOST,
phy->rx_preemphasis[i]);
@@ -2731,7 +2708,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
- CL45_WR_OVER_CL22(bp, phy,
+ CL22_WR_OVER_CL45(bp, phy,
bank,
MDIO_TX0_TX_DRIVER,
phy->tx_preemphasis[i]);
@@ -2754,7 +2731,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
/* forced speed requested? */
if (vars->line_speed != SPEED_AUTO_NEG ||
(SINGLE_MEDIA_DIRECT(params) &&
- params->loopback_mode == LOOPBACK_EXT)) {
+ params->loopback_mode == LOOPBACK_EXT)) {
DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
/* disable autoneg */
@@ -2771,7 +2748,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy,
/* program duplex & pause advertisement (for aneg) */
bnx2x_set_ieee_aneg_advertisment(phy, params,
- vars->ieee_fc);
+ vars->ieee_fc);
/* enable autoneg */
bnx2x_set_autoneg(phy, params, vars, enable_cl73);
@@ -2842,7 +2819,8 @@ static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy,
}
static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
- struct bnx2x_phy *phy)
+ struct bnx2x_phy *phy,
+ struct link_params *params)
{
u16 cnt, ctrl;
/* Wait for soft reset to get cleared upto 1 sec */
@@ -2853,6 +2831,11 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
break;
msleep(1);
}
+
+ if (cnt == 1000)
+ netdev_err(bp->dev, "Warning: PHY was not initialized,"
+ " Port %d\n",
+ params->port);
DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
return cnt;
}
@@ -2863,9 +2846,7 @@ static void bnx2x_link_int_enable(struct link_params *params)
u32 mask;
struct bnx2x *bp = params->bp;
- /* setting the status to report on link up
- for either XGXS or SerDes */
-
+ /* Setting the status to report on link up for either XGXS or SerDes */
if (params->switch_cfg == SWITCH_CFG_10G) {
mask = (NIG_MASK_XGXS0_LINK10G |
NIG_MASK_XGXS0_LINK_STATUS);
@@ -2908,7 +2889,7 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
{
u32 latch_status = 0;
- /**
+ /*
* Disable the MI INT ( external phy int ) by writing 1 to the
* status register. Link down indication is high-active-signal,
* so in this case we need to write the status to clear the XOR
@@ -2933,27 +2914,30 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
/* For all latched-signal=up : Re-Arm Latch signals */
REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
- (latch_status & 0xfffe) | (latch_status & 1));
+ (latch_status & 0xfffe) | (latch_status & 1));
}
/* For all latched-signal=up,Write original_signal to status */
}
static void bnx2x_link_int_ack(struct link_params *params,
- struct link_vars *vars, u8 is_10g)
+ struct link_vars *vars, u8 is_10g)
{
struct bnx2x *bp = params->bp;
u8 port = params->port;
- /* first reset all status
- * we assume only one line will be change at a time */
+ /*
+ * First reset all status we assume only one line will be
+ * change at a time
+ */
bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
- (NIG_STATUS_XGXS0_LINK10G |
- NIG_STATUS_XGXS0_LINK_STATUS |
- NIG_STATUS_SERDES0_LINK_STATUS));
+ (NIG_STATUS_XGXS0_LINK10G |
+ NIG_STATUS_XGXS0_LINK_STATUS |
+ NIG_STATUS_SERDES0_LINK_STATUS));
if (vars->phy_link_up) {
if (is_10g) {
- /* Disable the 10G link interrupt
- * by writing 1 to the status register
+ /*
+ * Disable the 10G link interrupt by writing 1 to the
+ * status register
*/
DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
bnx2x_bits_en(bp,
@@ -2961,9 +2945,9 @@ static void bnx2x_link_int_ack(struct link_params *params,
NIG_STATUS_XGXS0_LINK10G);
} else if (params->switch_cfg == SWITCH_CFG_10G) {
- /* Disable the link interrupt
- * by writing 1 to the relevant lane
- * in the status register
+ /*
+ * Disable the link interrupt by writing 1 to the
+ * relevant lane in the status register
*/
u32 ser_lane = ((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
@@ -2978,8 +2962,9 @@ static void bnx2x_link_int_ack(struct link_params *params,
} else { /* SerDes */
DP(NETIF_MSG_LINK, "SerDes phy link up\n");
- /* Disable the link interrupt
- * by writing 1 to the status register
+ /*
+ * Disable the link interrupt by writing 1 to the status
+ * register
*/
bnx2x_bits_en(bp,
NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
@@ -3059,8 +3044,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
}
if ((params->num_phys == MAX_PHYS) &&
(params->phy[EXT_PHY2].ver_addr != 0)) {
- spirom_ver = REG_RD(bp,
- params->phy[EXT_PHY2].ver_addr);
+ spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr);
if (params->phy[EXT_PHY2].format_fw_ver) {
*ver_p = '/';
ver_p++;
@@ -3089,29 +3073,27 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
/* change the uni_phy_addr in the nig */
md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
- port*0x18));
+ port*0x18));
REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
bnx2x_cl45_write(bp, phy,
- 5,
- (MDIO_REG_BANK_AER_BLOCK +
- (MDIO_AER_BLOCK_AER_REG & 0xf)),
- 0x2800);
+ 5,
+ (MDIO_REG_BANK_AER_BLOCK +
+ (MDIO_AER_BLOCK_AER_REG & 0xf)),
+ 0x2800);
bnx2x_cl45_write(bp, phy,
- 5,
- (MDIO_REG_BANK_CL73_IEEEB0 +
- (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
- 0x6041);
+ 5,
+ (MDIO_REG_BANK_CL73_IEEEB0 +
+ (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
+ 0x6041);
msleep(200);
/* set aer mmd back */
bnx2x_set_aer_mmd_xgxs(params, phy);
/* and md_devad */
- REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
- md_devad);
-
+ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
} else {
u16 mii_ctrl;
DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
@@ -3152,26 +3134,26 @@ u8 bnx2x_set_led(struct link_params *params,
case LED_MODE_OFF:
REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
- SHARED_HW_CFG_LED_MAC1);
+ SHARED_HW_CFG_LED_MAC1);
tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
break;
case LED_MODE_OPER:
- /**
+ /*
* For all other phys, OPER mode is same as ON, so in case
* link is down, do nothing
- **/
+ */
if (!vars->link_up)
break;
case LED_MODE_ON:
if (params->phy[EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 &&
CHIP_IS_E2(bp) && params->num_phys == 2) {
- /**
- * This is a work-around for E2+8727 Configurations
- */
+ /*
+ * This is a work-around for E2+8727 Configurations
+ */
if (mode == LED_MODE_ON ||
speed == SPEED_10000){
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
@@ -3183,41 +3165,40 @@ u8 bnx2x_set_led(struct link_params *params,
return rc;
}
} else if (SINGLE_MEDIA_DIRECT(params)) {
- /**
- * This is a work-around for HW issue found when link
- * is up in CL73
- */
+ /*
+ * This is a work-around for HW issue found when link
+ * is up in CL73
+ */
REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
} else {
- REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
- hw_led_mode);
+ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
}
- REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 +
- port*4, 0);
+ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
/* Set blinking rate to ~15.9Hz */
REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
- LED_BLINK_RATE_VAL);
+ LED_BLINK_RATE_VAL);
REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
- port*4, 1);
+ port*4, 1);
tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
- EMAC_WR(bp, EMAC_REG_EMAC_LED,
- (tmp & (~EMAC_LED_OVERRIDE)));
+ EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp & (~EMAC_LED_OVERRIDE)));
if (CHIP_IS_E1(bp) &&
((speed == SPEED_2500) ||
(speed == SPEED_1000) ||
(speed == SPEED_100) ||
(speed == SPEED_10))) {
- /* On Everest 1 Ax chip versions for speeds less than
- 10G LED scheme is different */
+ /*
+ * On Everest 1 Ax chip versions for speeds less than
+ * 10G LED scheme is different
+ */
REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
- + port*4, 1);
+ + port*4, 1);
REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
- port*4, 0);
+ port*4, 0);
REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
- port*4, 1);
+ port*4, 1);
}
break;
@@ -3231,7 +3212,7 @@ u8 bnx2x_set_led(struct link_params *params,
}
-/**
+/*
* This function comes to reflect the actual link state read DIRECTLY from the
* HW
*/
@@ -3243,10 +3224,10 @@ u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars,
u8 ext_phy_link_up = 0, serdes_phy_type;
struct link_vars temp_vars;
- CL45_RD_OVER_CL22(bp, &params->phy[INT_PHY],
- MDIO_REG_BANK_GP_STATUS,
- MDIO_GP_STATUS_TOP_AN_STATUS1,
- &gp_status);
+ CL22_RD_OVER_CL45(bp, &params->phy[INT_PHY],
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
/* link is up only if both local phy and external phy are up */
if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
return -ESRCH;
@@ -3290,15 +3271,15 @@ static u8 bnx2x_link_initialize(struct link_params *params,
u8 rc = 0;
u8 phy_index, non_ext_phy;
struct bnx2x *bp = params->bp;
- /**
- * In case of external phy existence, the line speed would be the
- * line speed linked up by the external phy. In case it is direct
- * only, then the line_speed during initialization will be
- * equal to the req_line_speed
- */
+ /*
+ * In case of external phy existence, the line speed would be the
+ * line speed linked up by the external phy. In case it is direct
+ * only, then the line_speed during initialization will be
+ * equal to the req_line_speed
+ */
vars->line_speed = params->phy[INT_PHY].req_line_speed;
- /**
+ /*
* Initialize the internal phy in case this is a direct board
* (no external phys), or this board has external phy which requires
* to first.
@@ -3326,17 +3307,16 @@ static u8 bnx2x_link_initialize(struct link_params *params,
if (!non_ext_phy)
for (phy_index = EXT_PHY1; phy_index < params->num_phys;
phy_index++) {
- /**
+ /*
* No need to initialize second phy in case of first
* phy only selection. In case of second phy, we do
* need to initialize the first phy, since they are
* connected.
- **/
+ */
if (phy_index == EXT_PHY2 &&
(bnx2x_phy_selection(params) ==
PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
- DP(NETIF_MSG_LINK, "Not initializing"
- "second phy\n");
+ DP(NETIF_MSG_LINK, "Ignoring second phy\n");
continue;
}
params->phy[phy_index].config_init(
@@ -3358,9 +3338,8 @@ static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
/* reset the SerDes/XGXS */
- REG_WR(params->bp, GRCBASE_MISC +
- MISC_REGISTERS_RESET_REG_3_CLEAR,
- (0x1ff << (params->port*16)));
+ REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
+ (0x1ff << (params->port*16)));
}
static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
@@ -3374,11 +3353,11 @@ static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
else
gpio_port = params->port;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
DP(NETIF_MSG_LINK, "reset external PHY\n");
}
@@ -3409,9 +3388,8 @@ static u8 bnx2x_update_link_down(struct link_params *params,
/* reset BigMac */
bnx2x_bmac_rx_disable(bp, params->port);
- REG_WR(bp, GRCBASE_MISC +
- MISC_REGISTERS_RESET_REG_2_CLEAR,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+ (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
return 0;
}
@@ -3462,7 +3440,7 @@ static u8 bnx2x_update_link_up(struct link_params *params,
msleep(20);
return rc;
}
-/**
+/*
* The bnx2x_link_update function should be called upon link
* interrupt.
* Link is considered up as follows:
@@ -3501,12 +3479,11 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
- port*0x18) > 0);
+ port*0x18) > 0);
DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
is_mi_int,
- REG_RD(bp,
- NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
+ REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
@@ -3515,14 +3492,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
/* disable emac */
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
- /**
- * Step 1:
- * Check external link change only for external phys, and apply
- * priority selection between them in case the link on both phys
- * is up. Note that the instead of the common vars, a temporary
- * vars argument is used since each phy may have different link/
- * speed/duplex result
- */
+ /*
+ * Step 1:
+ * Check external link change only for external phys, and apply
+ * priority selection between them in case the link on both phys
+ * is up. Note that the instead of the common vars, a temporary
+ * vars argument is used since each phy may have different link/
+ * speed/duplex result
+ */
for (phy_index = EXT_PHY1; phy_index < params->num_phys;
phy_index++) {
struct bnx2x_phy *phy = &params->phy[phy_index];
@@ -3547,22 +3524,22 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
switch (bnx2x_phy_selection(params)) {
case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
- /**
+ /*
* In this option, the first PHY makes sure to pass the
* traffic through itself only.
* Its not clear how to reset the link on the second phy
- **/
+ */
active_external_phy = EXT_PHY1;
break;
case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
- /**
+ /*
* In this option, the first PHY makes sure to pass the
* traffic through the second PHY.
- **/
+ */
active_external_phy = EXT_PHY2;
break;
default:
- /**
+ /*
* Link indication on both PHYs with the following cases
* is invalid:
* - FIRST_PHY means that second phy wasn't initialized,
@@ -3570,7 +3547,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
* - SECOND_PHY means that first phy should not be able
* to link up by itself (using configuration)
* - DEFAULT should be overriden during initialiazation
- **/
+ */
DP(NETIF_MSG_LINK, "Invalid link indication"
"mpc=0x%x. DISABLING LINK !!!\n",
params->multi_phy_config);
@@ -3580,18 +3557,18 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
}
}
prev_line_speed = vars->line_speed;
- /**
- * Step 2:
- * Read the status of the internal phy. In case of
- * DIRECT_SINGLE_MEDIA board, this link is the external link,
- * otherwise this is the link between the 577xx and the first
- * external phy
- */
+ /*
+ * Step 2:
+ * Read the status of the internal phy. In case of
+ * DIRECT_SINGLE_MEDIA board, this link is the external link,
+ * otherwise this is the link between the 577xx and the first
+ * external phy
+ */
if (params->phy[INT_PHY].read_status)
params->phy[INT_PHY].read_status(
&params->phy[INT_PHY],
params, vars);
- /**
+ /*
* The INT_PHY flow control reside in the vars. This include the
* case where the speed or flow control are not set to AUTO.
* Otherwise, the active external phy flow control result is set
@@ -3601,13 +3578,13 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
*/
if (active_external_phy > INT_PHY) {
vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
- /**
+ /*
* Link speed is taken from the XGXS. AN and FC result from
* the external phy.
*/
vars->link_status |= phy_vars[active_external_phy].link_status;
- /**
+ /*
* if active_external_phy is first PHY and link is up - disable
* disable TX on second external PHY
*/
@@ -3643,7 +3620,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
" ext_phy_line_speed = %d\n", vars->flow_ctrl,
vars->link_status, ext_phy_line_speed);
- /**
+ /*
* Upon link speed change set the NIG into drain mode. Comes to
* deals with possible FIFO glitch due to clk change when speed
* is decreased without link down indicator
@@ -3658,8 +3635,8 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
ext_phy_line_speed);
vars->phy_link_up = 0;
} else if (prev_line_speed != vars->line_speed) {
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
- + params->port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
+ 0);
msleep(1);
}
}
@@ -3674,14 +3651,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
bnx2x_link_int_ack(params, vars, link_10g);
- /**
- * In case external phy link is up, and internal link is down
- * (not initialized yet probably after link initialization, it
- * needs to be initialized.
- * Note that after link down-up as result of cable plug, the xgxs
- * link would probably become up again without the need
- * initialize it
- */
+ /*
+ * In case external phy link is up, and internal link is down
+ * (not initialized yet probably after link initialization, it
+ * needs to be initialized.
+ * Note that after link down-up as result of cable plug, the xgxs
+ * link would probably become up again without the need
+ * initialize it
+ */
if (!(SINGLE_MEDIA_DIRECT(params))) {
DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
" init_preceding = %d\n", ext_phy_link_up,
@@ -3701,9 +3678,9 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
vars);
}
}
- /**
- * Link is up only if both local phy and external phy (in case of
- * non-direct board) are up
+ /*
+ * Link is up only if both local phy and external phy (in case of
+ * non-direct board) are up
*/
vars->link_up = (vars->phy_link_up &&
(ext_phy_link_up ||
@@ -3724,10 +3701,10 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
{
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
msleep(1);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
}
static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
@@ -3747,9 +3724,9 @@ static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
u16 fw_ver1, fw_ver2;
bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+ MDIO_PMA_REG_ROM_VER1, &fw_ver1);
bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2, &fw_ver2);
+ MDIO_PMA_REG_ROM_VER2, &fw_ver2);
bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
phy->ver_addr);
}
@@ -3770,7 +3747,7 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
if ((vars->ieee_fc &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
- val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+ val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
}
if ((vars->ieee_fc &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
@@ -3801,11 +3778,11 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
ret = 1;
bnx2x_cl45_read(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_ADV_PAUSE, &ld_pause);
bnx2x_cl45_read(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
pause_result = (ld_pause &
MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
pause_result |= (lp_pause &
@@ -3881,31 +3858,31 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
/* Boot port from external ROM */
/* EDC grst */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- 0x0001);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ 0x0001);
/* ucode reboot and rst */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- 0x008c);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ 0x008c);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0001);
/* Reset internal microprocessor */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
/* Release srst bit */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
/* Delay 100ms per the PHY specifications */
msleep(100);
@@ -3936,8 +3913,8 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
/* Clear ser_boot_ctl bit */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0000);
bnx2x_save_bcm_spirom_ver(bp, phy, port);
DP(NETIF_MSG_LINK,
@@ -3958,8 +3935,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
/* Read 8073 HW revision*/
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8073_CHIP_REV, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_CHIP_REV, &val);
if (val != 1) {
/* No need to workaround in 8073 A1 */
@@ -3967,8 +3944,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
}
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2, &val);
/* SNR should be applied only for version 0x102 */
if (val != 0x102)
@@ -3982,8 +3959,8 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
u16 val, cnt, cnt1 ;
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8073_CHIP_REV, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_CHIP_REV, &val);
if (val > 0) {
/* No need to workaround in 8073 A1 */
@@ -3991,26 +3968,32 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
}
/* XAUI workaround in 8073 A0: */
- /* After loading the boot ROM and restarting Autoneg,
- poll Dev1, Reg $C820: */
+ /*
+ * After loading the boot ROM and restarting Autoneg, poll
+ * Dev1, Reg $C820:
+ */
for (cnt = 0; cnt < 1000; cnt++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
- &val);
- /* If bit [14] = 0 or bit [13] = 0, continue on with
- system initialization (XAUI work-around not required,
- as these bits indicate 2.5G or 1G link up). */
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
+ &val);
+ /*
+ * If bit [14] = 0 or bit [13] = 0, continue on with
+ * system initialization (XAUI work-around not required, as
+ * these bits indicate 2.5G or 1G link up).
+ */
if (!(val & (1<<14)) || !(val & (1<<13))) {
DP(NETIF_MSG_LINK, "XAUI work-around not required\n");
return 0;
} else if (!(val & (1<<15))) {
- DP(NETIF_MSG_LINK, "clc bit 15 went off\n");
- /* If bit 15 is 0, then poll Dev1, Reg $C841 until
- it's MSB (bit 15) goes to 1 (indicating that the
- XAUI workaround has completed),
- then continue on with system initialization.*/
+ DP(NETIF_MSG_LINK, "bit 15 went off\n");
+ /*
+ * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
+ * MSB (bit15) goes to 1 (indicating that the XAUI
+ * workaround has completed), then continue on with
+ * system initialization.
+ */
for (cnt1 = 0; cnt1 < 1000; cnt1++) {
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
@@ -4093,10 +4076,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
gpio_port = params->port;
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
/* enable LASI */
bnx2x_cl45_write(bp, phy,
@@ -4114,10 +4097,6 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
- /**
- * If this is forced speed, set to KR or KX (all other are not
- * supported)
- */
/* Swap polarity if required - Must be done only in non-1G mode */
if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
/* Configure the 8073 to swap _P and _N of the KR lines */
@@ -4160,8 +4139,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
val = (1<<7);
} else if (phy->req_line_speed == SPEED_2500) {
val = (1<<5);
- /* Note that 2.5G works only
- when used with 1G advertisment */
+ /*
+ * Note that 2.5G works only when used with 1G
+ * advertisment
+ */
} else
val = (1<<5);
} else {
@@ -4170,8 +4151,7 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
val |= (1<<7);
- /* Note that 2.5G works only when
- used with 1G advertisment */
+ /* Note that 2.5G works only when used with 1G advertisment */
if (phy->speed_cap_mask &
(PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
@@ -4211,9 +4191,11 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
/* Add support for CL37 (passive mode) III */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
- /* The SNR will improve about 2db by changing
- BW and FEE main tap. Rest commands are executed
- after link is up*/
+ /*
+ * The SNR will improve about 2db by changing BW and FEE main
+ * tap. Rest commands are executed after link is up
+ * Change FFE main cursor to 5 in EDC register
+ */
if (bnx2x_8073_is_snr_needed(bp, phy))
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
@@ -4297,12 +4279,11 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
- /* The SNR will improve about 2dbby
- changing the BW and FEE main tap.*/
- /* The 1st write to change FFE main
- tap is set before restart AN */
- /* Change PLL Bandwidth in EDC
- register */
+ /*
+ * The SNR will improve about 2dbby changing the BW and FEE main
+ * tap. The 1st write to change FFE main tap is set before
+ * restart AN. Change PLL Bandwidth in EDC register
+ */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
0x26BC);
@@ -4346,10 +4327,10 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
bnx2x_cl45_read(bp, phy,
MDIO_XS_DEVAD,
MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
- /**
- * Set bit 3 to invert Rx in 1G mode and clear this bit
- * when it`s in 10G mode.
- */
+ /*
+ * Set bit 3 to invert Rx in 1G mode and clear this bit
+ * when it`s in 10G mode.
+ */
if (vars->line_speed == SPEED_1000) {
DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
"the 8073\n");
@@ -4381,8 +4362,8 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
gpio_port);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- gpio_port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ gpio_port);
}
/******************************************************************/
@@ -4396,11 +4377,11 @@ static u8 bnx2x_8705_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "init 8705\n");
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
bnx2x_ext_phy_hw_reset(bp, params->port);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
@@ -4451,35 +4432,79 @@ static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
/******************************************************************/
/* SFP+ module Section */
/******************************************************************/
-static void bnx2x_sfp_set_transmitter(struct bnx2x *bp,
+static u8 bnx2x_get_gpio_port(struct link_params *params)
+{
+ u8 gpio_port;
+ u32 swap_val, swap_override;
+ struct bnx2x *bp = params->bp;
+ if (CHIP_IS_E2(bp))
+ gpio_port = BP_PATH(bp);
+ else
+ gpio_port = params->port;
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ return gpio_port ^ (swap_val && swap_override);
+}
+static void bnx2x_sfp_set_transmitter(struct link_params *params,
struct bnx2x_phy *phy,
- u8 port,
u8 tx_en)
{
u16 val;
+ u8 port = params->port;
+ struct bnx2x *bp = params->bp;
+ u32 tx_en_mode;
- DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x\n",
- tx_en, port);
/* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
- bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- &val);
+ tx_en_mode = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].sfp_ctrl)) &
+ PORT_HW_CFG_TX_LASER_MASK;
+ DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x "
+ "mode = %x\n", tx_en, port, tx_en_mode);
+ switch (tx_en_mode) {
+ case PORT_HW_CFG_TX_LASER_MDIO:
- if (tx_en)
- val &= ~(1<<15);
- else
- val |= (1<<15);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ &val);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- val);
+ if (tx_en)
+ val &= ~(1<<15);
+ else
+ val |= (1<<15);
+
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ val);
+ break;
+ case PORT_HW_CFG_TX_LASER_GPIO0:
+ case PORT_HW_CFG_TX_LASER_GPIO1:
+ case PORT_HW_CFG_TX_LASER_GPIO2:
+ case PORT_HW_CFG_TX_LASER_GPIO3:
+ {
+ u16 gpio_pin;
+ u8 gpio_port, gpio_mode;
+ if (tx_en)
+ gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH;
+ else
+ gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW;
+
+ gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0;
+ gpio_port = bnx2x_get_gpio_port(params);
+ bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+ break;
+ }
+ default:
+ DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode);
+ break;
+ }
}
static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
- u16 addr, u8 byte_cnt, u8 *o_buf)
+ u16 addr, u8 byte_cnt, u8 *o_buf)
{
struct bnx2x *bp = params->bp;
u16 val = 0;
@@ -4492,23 +4517,23 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Set the read command byte count */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
- (byte_cnt | 0xa000));
+ (byte_cnt | 0xa000));
/* Set the read command address */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
- addr);
+ addr);
/* Activate read command */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
- 0x2c0f);
+ 0x2c0f);
/* Wait up to 500us for command complete status */
for (i = 0; i < 100; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
break;
@@ -4526,15 +4551,15 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Read the buffer */
for (i = 0; i < byte_cnt; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
}
for (i = 0; i < 100; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
return 0;
@@ -4545,7 +4570,7 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
- u16 addr, u8 byte_cnt, u8 *o_buf)
+ u16 addr, u8 byte_cnt, u8 *o_buf)
{
struct bnx2x *bp = params->bp;
u16 val, i;
@@ -4558,41 +4583,43 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Need to read from 1.8000 to clear it */
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
- &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+ &val);
/* Set the read command byte count */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
- ((byte_cnt < 2) ? 2 : byte_cnt));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
+ ((byte_cnt < 2) ? 2 : byte_cnt));
/* Set the read command address */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
- addr);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
+ addr);
/* Set the destination address */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- 0x8004,
- MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
+ MDIO_PMA_DEVAD,
+ 0x8004,
+ MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
/* Activate read command */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
- 0x8002);
- /* Wait appropriate time for two-wire command to finish before
- polling the status register */
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+ 0x8002);
+ /*
+ * Wait appropriate time for two-wire command to finish before
+ * polling the status register
+ */
msleep(1);
/* Wait up to 500us for command complete status */
for (i = 0; i < 100; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
break;
@@ -4604,21 +4631,21 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK,
"Got bad status 0x%x when reading from SFP+ EEPROM\n",
(val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
- return -EINVAL;
+ return -EFAULT;
}
/* Read the buffer */
for (i = 0; i < byte_cnt; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
}
for (i = 0; i < 100; i++) {
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
return 0;
@@ -4628,22 +4655,22 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
return -EINVAL;
}
-static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf)
+u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u16 addr,
+ u8 byte_cnt, u8 *o_buf)
{
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf);
+ byte_cnt, o_buf);
else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf);
+ byte_cnt, o_buf);
return -EINVAL;
}
static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
struct link_params *params,
- u16 *edc_mode)
+ u16 *edc_mode)
{
struct bnx2x *bp = params->bp;
u8 val, check_limiting_mode = 0;
@@ -4664,8 +4691,10 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
{
u8 copper_module_type;
- /* Check if its active cable( includes SFP+ module)
- of passive cable*/
+ /*
+ * Check if its active cable (includes SFP+ module)
+ * of passive cable
+ */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
SFP_EEPROM_FC_TX_TECH_ADDR,
@@ -4724,8 +4753,10 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
return 0;
}
-/* This function read the relevant field from the module ( SFP+ ),
- and verify it is compliant with this board */
+/*
+ * This function read the relevant field from the module (SFP+), and verify it
+ * is compliant with this board
+ */
static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
struct link_params *params)
{
@@ -4774,24 +4805,24 @@ static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
/* format the warning message */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
- SFP_EEPROM_VENDOR_NAME_ADDR,
- SFP_EEPROM_VENDOR_NAME_SIZE,
- (u8 *)vendor_name))
+ SFP_EEPROM_VENDOR_NAME_ADDR,
+ SFP_EEPROM_VENDOR_NAME_SIZE,
+ (u8 *)vendor_name))
vendor_name[0] = '\0';
else
vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
if (bnx2x_read_sfp_module_eeprom(phy,
params,
- SFP_EEPROM_PART_NO_ADDR,
- SFP_EEPROM_PART_NO_SIZE,
- (u8 *)vendor_pn))
+ SFP_EEPROM_PART_NO_ADDR,
+ SFP_EEPROM_PART_NO_SIZE,
+ (u8 *)vendor_pn))
vendor_pn[0] = '\0';
else
vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
- netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected,"
- " Port %d from %s part number %s\n",
- params->port, vendor_name, vendor_pn);
+ netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected,"
+ " Port %d from %s part number %s\n",
+ params->port, vendor_name, vendor_pn);
phy->flags |= FLAGS_SFP_NOT_APPROVED;
return -EINVAL;
}
@@ -4803,8 +4834,11 @@ static u8 bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
u8 val;
struct bnx2x *bp = params->bp;
u16 timeout;
- /* Initialization time after hot-plug may take up to 300ms for some
- phys type ( e.g. JDSU ) */
+ /*
+ * Initialization time after hot-plug may take up to 300ms for
+ * some phys type ( e.g. JDSU )
+ */
+
for (timeout = 0; timeout < 60; timeout++) {
if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
== 0) {
@@ -4823,16 +4857,14 @@ static void bnx2x_8727_power_module(struct bnx2x *bp,
/* Make sure GPIOs are not using for LED mode */
u16 val;
/*
- * In the GPIO register, bit 4 is use to detemine if the GPIOs are
+ * In the GPIO register, bit 4 is use to determine if the GPIOs are
* operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
* output
* Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0
* Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1
* where the 1st bit is the over-current(only input), and 2nd bit is
* for power( only output )
- */
-
- /*
+ *
* In case of NOC feature is disabled and power is up, set GPIO control
* as input to enable listening of over-current indication
*/
@@ -4861,15 +4893,14 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
u16 cur_limiting_mode;
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2,
- &cur_limiting_mode);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ &cur_limiting_mode);
DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n",
cur_limiting_mode);
if (edc_mode == EDC_MODE_LIMITING) {
- DP(NETIF_MSG_LINK,
- "Setting LIMITING MODE\n");
+ DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n");
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER2,
@@ -4878,62 +4909,63 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
- /* Changing to LRM mode takes quite few seconds.
- So do it only if current mode is limiting
- ( default is LRM )*/
+ /*
+ * Changing to LRM mode takes quite few seconds. So do it only
+ * if current mode is limiting (default is LRM)
+ */
if (cur_limiting_mode != EDC_MODE_LIMITING)
return 0;
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LRM_MODE,
- 0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_LRM_MODE,
+ 0);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2,
- 0x128);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ 0x128);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL0,
- 0x4008);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL0,
+ 0x4008);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_LRM_MODE,
- 0xaaaa);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_LRM_MODE,
+ 0xaaaa);
}
return 0;
}
static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
struct bnx2x_phy *phy,
- u16 edc_mode)
+ u16 edc_mode)
{
u16 phy_identifier;
u16 rom_ver2_val;
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- &phy_identifier);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ &phy_identifier);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- (phy_identifier & ~(1<<9)));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ (phy_identifier & ~(1<<9)));
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2,
- &rom_ver2_val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ &rom_ver2_val);
/* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_ROM_VER2,
- (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_ROM_VER2,
+ (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER,
- (phy_identifier | (1<<9)));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER,
+ (phy_identifier | (1<<9)));
return 0;
}
@@ -4946,11 +4978,11 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
switch (action) {
case DISABLE_TX:
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ bnx2x_sfp_set_transmitter(params, phy, 0);
break;
case ENABLE_TX:
if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 1);
+ bnx2x_sfp_set_transmitter(params, phy, 1);
break;
default:
DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
@@ -4959,6 +4991,38 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
}
}
+static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
+ u8 gpio_mode)
+{
+ struct bnx2x *bp = params->bp;
+
+ u32 fault_led_gpio = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].sfp_ctrl)) &
+ PORT_HW_CFG_FAULT_MODULE_LED_MASK;
+ switch (fault_led_gpio) {
+ case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED:
+ return;
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2:
+ case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3:
+ {
+ u8 gpio_port = bnx2x_get_gpio_port(params);
+ u16 gpio_pin = fault_led_gpio -
+ PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
+ DP(NETIF_MSG_LINK, "Set fault module-detected led "
+ "pin %x port %x mode %x\n",
+ gpio_pin, gpio_port, gpio_mode);
+ bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+ }
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n",
+ fault_led_gpio);
+ }
+}
+
static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
struct link_params *params)
{
@@ -4976,15 +5040,14 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
return -EINVAL;
- } else if (bnx2x_verify_sfp_module(phy, params) !=
- 0) {
+ } else if (bnx2x_verify_sfp_module(phy, params) != 0) {
/* check SFP+ module compatibility */
DP(NETIF_MSG_LINK, "Module verification failed!!\n");
rc = -EINVAL;
/* Turn on fault module-detected led */
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
- MISC_REGISTERS_GPIO_HIGH,
- params->port);
+ bnx2x_set_sfp_module_fault_led(params,
+ MISC_REGISTERS_GPIO_HIGH);
+
if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) &&
((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) {
@@ -4995,18 +5058,17 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
}
} else {
/* Turn off fault module-detected led */
- DP(NETIF_MSG_LINK, "Turn off fault module-detected led\n");
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
- MISC_REGISTERS_GPIO_LOW,
- params->port);
+ bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
}
/* power up the SFP module */
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
bnx2x_8727_power_module(bp, phy, 1);
- /* Check and set limiting mode / LRM mode on 8726.
- On 8727 it is done automatically */
+ /*
+ * Check and set limiting mode / LRM mode on 8726. On 8727 it
+ * is done automatically
+ */
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
bnx2x_8726_set_limiting_mode(bp, phy, edc_mode);
else
@@ -5018,9 +5080,9 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
if (rc == 0 ||
(val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 1);
+ bnx2x_sfp_set_transmitter(params, phy, 1);
else
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ bnx2x_sfp_set_transmitter(params, phy, 0);
return rc;
}
@@ -5033,11 +5095,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
u8 port = params->port;
/* Set valid module led off */
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
- MISC_REGISTERS_GPIO_HIGH,
- params->port);
+ bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
- /* Get current gpio val refelecting module plugged in / out*/
+ /* Get current gpio val reflecting module plugged in / out*/
gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port);
/* Call the handling function in case module is detected */
@@ -5053,18 +5113,20 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
} else {
u32 val = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
- port_feature_config[params->port].
- config));
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].
+ config));
bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
port);
- /* Module was plugged out. */
- /* Disable transmit for this module */
+ /*
+ * Module was plugged out.
+ * Disable transmit for this module
+ */
if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ bnx2x_sfp_set_transmitter(params, phy, 0);
}
}
@@ -5100,9 +5162,9 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
" link_status 0x%x\n", rx_sd, pcs_status, val2);
- /* link is up if both bit 0 of pmd_rx_sd and
- * bit 0 of pcs_status are set, or if the autoneg bit
- * 1 is set
+ /*
+ * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
+ * are set, or if the autoneg bit 1 is set
*/
link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
if (link_up) {
@@ -5123,14 +5185,15 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
- u16 cnt, val;
+ u32 tx_en_mode;
+ u16 cnt, val, tmp1;
struct bnx2x *bp = params->bp;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
bnx2x_ext_phy_hw_reset(bp, params->port);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
/* Wait until fw is loaded */
for (cnt = 0; cnt < 100; cnt++) {
@@ -5197,6 +5260,26 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
0x0004);
}
bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
+
+ /*
+ * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+ * power mode, if TX Laser is disabled
+ */
+
+ tx_en_mode = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].sfp_ctrl))
+ & PORT_HW_CFG_TX_LASER_MASK;
+
+ if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+ DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1);
+ tmp1 |= 0x1;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
+ }
+
return 0;
}
@@ -5231,26 +5314,26 @@ static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
/* Set soft reset */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0001);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_GEN_CTRL,
- MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_GEN_CTRL,
+ MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
/* wait for 150ms for microcode load */
msleep(150);
/* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_MISC_CTRL1, 0x0000);
msleep(200);
bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
@@ -5285,23 +5368,18 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
u32 val;
u32 swap_val, swap_override, aeu_gpio_mask, offset;
DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
- /* Restore normal power mode*/
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
-
- bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
bnx2x_8726_external_rom_boot(phy, params);
- /* Need to call module detected on initialization since
- the module detection triggered by actual module
- insertion might occur before driver is loaded, and when
- driver is loaded, it reset all registers, including the
- transmitter */
+ /*
+ * Need to call module detected on initialization since the module
+ * detection triggered by actual module insertion might occur before
+ * driver is loaded, and when driver is loaded, it reset all
+ * registers, including the transmitter
+ */
bnx2x_sfp_module_detection(phy, params);
if (phy->req_line_speed == SPEED_1000) {
@@ -5334,8 +5412,10 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
- /* Enable RX-ALARM control to receive
- interrupt for 1G speed change */
+ /*
+ * Enable RX-ALARM control to receive interrupt for 1G speed
+ * change
+ */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4);
bnx2x_cl45_write(bp, phy,
@@ -5367,7 +5447,7 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy,
/* Set GPIO3 to trigger SFP+ module insertion/removal */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
- MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
+ MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
/* The GPIO should be swapped if the swap register is set and active */
swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
@@ -5458,7 +5538,7 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
struct link_params *params) {
u32 swap_val, swap_override;
u8 port;
- /**
+ /*
* The PHY reset is controlled by GPIO 1. Fake the port number
* to cancel the swap done in set_gpio()
*/
@@ -5467,20 +5547,21 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
port = (swap_val && swap_override) ^ 1;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
}
static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
- u16 tmp1, val, mod_abs;
+ u32 tx_en_mode;
+ u16 tmp1, val, mod_abs, tmp2;
u16 rx_alarm_ctrl_val;
u16 lasi_ctrl_val;
struct bnx2x *bp = params->bp;
/* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
lasi_ctrl_val = 0x0004;
@@ -5493,14 +5574,17 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val);
- /* Initially configure MOD_ABS to interrupt when
- module is presence( bit 8) */
+ /*
+ * Initially configure MOD_ABS to interrupt when module is
+ * presence( bit 8)
+ */
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
- /* Set EDC off by setting OPTXLOS signal input to low
- (bit 9).
- When the EDC is off it locks onto a reference clock and
- avoids becoming 'lost'.*/
+ /*
+ * Set EDC off by setting OPTXLOS signal input to low (bit 9).
+ * When the EDC is off it locks onto a reference clock and avoids
+ * becoming 'lost'
+ */
mod_abs &= ~(1<<8);
if (!(phy->flags & FLAGS_NOC))
mod_abs &= ~(1<<9);
@@ -5515,7 +5599,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
if (phy->flags & FLAGS_NOC)
val |= (3<<5);
- /**
+ /*
* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
* status which reflect SFP+ module over-current
*/
@@ -5542,7 +5626,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
- /**
+ /*
* Power down the XAUI until link is up in case of dual-media
* and 1G
*/
@@ -5568,7 +5652,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
} else {
- /**
+ /*
* Since the 8727 has only single reset pin, need to set the 10G
* registers although it is default
*/
@@ -5584,7 +5668,8 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
0x0008);
}
- /* Set 2-wire transfer rate of SFP+ module EEPROM
+ /*
+ * Set 2-wire transfer rate of SFP+ module EEPROM
* to 100Khz since some DACs(direct attached cables) do
* not work at 400Khz.
*/
@@ -5607,6 +5692,26 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
phy->tx_preemphasis[1]);
}
+ /*
+ * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+ * power mode, if TX Laser is disabled
+ */
+ tx_en_mode = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].sfp_ctrl))
+ & PORT_HW_CFG_TX_LASER_MASK;
+
+ if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+
+ DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2);
+ tmp2 |= 0x1000;
+ tmp2 &= 0xFFEF;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
+ }
+
return 0;
}
@@ -5620,46 +5725,49 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
port_feature_config[params->port].
config));
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
if (mod_abs & (1<<8)) {
/* Module is absent */
DP(NETIF_MSG_LINK, "MOD_ABS indication "
"show module is absent\n");
- /* 1. Set mod_abs to detect next module
- presence event
- 2. Set EDC off by setting OPTXLOS signal input to low
- (bit 9).
- When the EDC is off it locks onto a reference clock and
- avoids becoming 'lost'.*/
+ /*
+ * 1. Set mod_abs to detect next module
+ * presence event
+ * 2. Set EDC off by setting OPTXLOS signal input to low
+ * (bit 9).
+ * When the EDC is off it locks onto a reference clock and
+ * avoids becoming 'lost'.
+ */
mod_abs &= ~(1<<8);
if (!(phy->flags & FLAGS_NOC))
mod_abs &= ~(1<<9);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
- /* Clear RX alarm since it stays up as long as
- the mod_abs wasn't changed */
+ /*
+ * Clear RX alarm since it stays up as long as
+ * the mod_abs wasn't changed
+ */
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
} else {
/* Module is present */
DP(NETIF_MSG_LINK, "MOD_ABS indication "
"show module is present\n");
- /* First thing, disable transmitter,
- and if the module is ok, the
- module_detection will enable it*/
-
- /* 1. Set mod_abs to detect next module
- absent event ( bit 8)
- 2. Restore the default polarity of the OPRXLOS signal and
- this signal will then correctly indicate the presence or
- absence of the Rx signal. (bit 9) */
+ /*
+ * First disable transmitter, and if the module is ok, the
+ * module_detection will enable it
+ * 1. Set mod_abs to detect next module absent event ( bit 8)
+ * 2. Restore the default polarity of the OPRXLOS signal and
+ * this signal will then correctly indicate the presence or
+ * absence of the Rx signal. (bit 9)
+ */
mod_abs |= (1<<8);
if (!(phy->flags & FLAGS_NOC))
mod_abs |= (1<<9);
@@ -5667,10 +5775,12 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
- /* Clear RX alarm since it stays up as long as
- the mod_abs wasn't changed. This is need to be done
- before calling the module detection, otherwise it will clear
- the link update alarm */
+ /*
+ * Clear RX alarm since it stays up as long as the mod_abs
+ * wasn't changed. This is need to be done before calling the
+ * module detection, otherwise it will clear* the link update
+ * alarm
+ */
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
@@ -5678,7 +5788,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ bnx2x_sfp_set_transmitter(params, phy, 0);
if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
bnx2x_sfp_module_detection(phy, params);
@@ -5687,9 +5797,8 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
}
DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
- rx_alarm_status);
- /* No need to check link status in case of
- module plugged in/out */
+ rx_alarm_status);
+ /* No need to check link status in case of module plugged in/out */
}
static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
@@ -5725,7 +5834,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
- /**
+ /*
* If a module is present and there is need to check
* for over current
*/
@@ -5745,12 +5854,8 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
" Please remove the SFP+ module and"
" restart the system to clear this"
" error.\n",
- params->port);
-
- /*
- * Disable all RX_ALARMs except for
- * mod_abs
- */
+ params->port);
+ /* Disable all RX_ALARMs except for mod_abs */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5));
@@ -5793,11 +5898,15 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
- /* Bits 0..2 --> speed detected,
- bits 13..15--> link is down */
+ /*
+ * Bits 0..2 --> speed detected,
+ * Bits 13..15--> link is down
+ */
if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
link_up = 1;
vars->line_speed = SPEED_10000;
+ DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
+ params->port);
} else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
link_up = 1;
vars->line_speed = SPEED_1000;
@@ -5819,7 +5928,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8727_PCS_GP, &val1);
- /**
+ /*
* In case of dual-media board and 1G, power up the XAUI side,
* otherwise power it down. For 10G it is done automatically
*/
@@ -5839,7 +5948,7 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
/* Disable Transmitter */
- bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+ bnx2x_sfp_set_transmitter(params, phy, 0);
/* Clear LASI */
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0);
@@ -5851,19 +5960,23 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
struct link_params *params)
{
- u16 val, fw_ver1, fw_ver2, cnt;
+ u16 val, fw_ver1, fw_ver2, cnt, adj;
struct bnx2x *bp = params->bp;
+ adj = 0;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ adj = -1;
+
/* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
/* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0014);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, 0x0300);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x0009);
for (cnt = 0; cnt < 100; cnt++) {
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
if (val & 1)
break;
udelay(5);
@@ -5877,11 +5990,11 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
/* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x000A);
for (cnt = 0; cnt < 100; cnt++) {
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
if (val & 1)
break;
udelay(5);
@@ -5894,9 +6007,9 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
}
/* lower 16 bits of the register SPI_FW_STATUS */
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, &fw_ver1);
/* upper 16 bits of register SPI_FW_STATUS */
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, &fw_ver2);
bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1,
phy->ver_addr);
@@ -5905,49 +6018,53 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
static void bnx2x_848xx_set_led(struct bnx2x *bp,
struct bnx2x_phy *phy)
{
- u16 val;
+ u16 val, adj;
+
+ adj = 0;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ adj = -1;
/* PHYC_CTL_LED_CTL */
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
+ MDIO_PMA_REG_8481_LINK_SIGNAL + adj, &val);
val &= 0xFE00;
val |= 0x0092;
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL, val);
+ MDIO_PMA_REG_8481_LINK_SIGNAL + adj, val);
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
+ MDIO_PMA_REG_8481_LED1_MASK + adj,
0x80);
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK,
+ MDIO_PMA_REG_8481_LED2_MASK + adj,
0x18);
/* Select activity source by Tx and Rx, as suggested by PHY AE */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK,
+ MDIO_PMA_REG_8481_LED3_MASK + adj,
0x0006);
/* Select the closest activity blink rate to that in 10/100/1000 */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_BLINK,
+ MDIO_PMA_REG_8481_LED3_BLINK + adj,
0);
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
+ MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, &val);
val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
+ MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, val);
/* 'Interrupt Mask' */
bnx2x_cl45_write(bp, phy,
@@ -5961,7 +6078,11 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 autoneg_val, an_1000_val, an_10_100_val;
-
+ /*
+ * This phy uses the NIG latch mechanism since link indication
+ * arrives through its LED4 and not via its LASI signal, so we
+ * get steady signal instead of clear on read
+ */
bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
1 << NIG_LATCH_BC_ENABLE_MI_INT);
@@ -6086,11 +6207,11 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
bnx2x_ext_phy_hw_reset(bp, params->port);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -6102,12 +6223,15 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u8 port, initialize = 1;
- u16 val;
+ u16 val, adj;
u16 temp;
- u32 actual_phy_selection;
+ u32 actual_phy_selection, cms_enable;
u8 rc = 0;
/* This is just for MDIO_CTL_REG_84823_MEDIA register. */
+ adj = 0;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ adj = 3;
msleep(1);
if (CHIP_IS_E2(bp))
@@ -6117,11 +6241,12 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
MISC_REGISTERS_GPIO_OUTPUT_HIGH,
port);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
/* Wait for GPHY to come out of reset */
msleep(50);
- /* BCM84823 requires that XGXS links up first @ 10G for normal
- behavior */
+ /*
+ * BCM84823 requires that XGXS links up first @ 10G for normal behavior
+ */
temp = vars->line_speed;
vars->line_speed = SPEED_10000;
bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
@@ -6131,7 +6256,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
/* Set dual-media configuration according to configuration */
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_CTL_REG_84823_MEDIA, &val);
+ MDIO_CTL_REG_84823_MEDIA + adj, &val);
val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
@@ -6164,7 +6289,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_CTL_REG_84823_MEDIA, val);
+ MDIO_CTL_REG_84823_MEDIA + adj, val);
DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
params->multi_phy_config, val);
@@ -6172,23 +6297,43 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
else
bnx2x_save_848xx_spirom_version(phy, params);
+ cms_enable = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_ENABLE_CMS_MASK;
+
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
+ if (cms_enable)
+ val |= MDIO_CTL_REG_84823_USER_CTRL_CMS;
+ else
+ val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS;
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_CTL_REG_84823_USER_CTRL_REG, val);
+
+
return rc;
}
static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+ struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u16 val, val1, val2;
+ u16 val, val1, val2, adj;
u8 link_up = 0;
+ /* Reg offset adjustment for 84833 */
+ adj = 0;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ adj = -1;
+
/* Check 10G-BaseT link status */
/* Check PMD signal ok */
bnx2x_cl45_read(bp, phy,
MDIO_AN_DEVAD, 0xFFFA, &val1);
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL + adj,
&val2);
DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
@@ -6273,9 +6418,9 @@ static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
}
static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
@@ -6297,8 +6442,8 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
else
port = params->port;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
- MISC_REGISTERS_GPIO_OUTPUT_LOW,
- port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
}
static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
@@ -6353,24 +6498,24 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
/* Set LED masks */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
- 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK,
- 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x0);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK,
- 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED5_MASK,
- 0x20);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x20);
} else {
bnx2x_cl45_write(bp, phy,
@@ -6394,35 +6539,35 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
val |= 0x2492;
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LINK_SIGNAL,
- val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ val);
/* Set LED masks */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
- 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x0);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK,
- 0x20);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x20);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK,
- 0x20);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x20);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED5_MASK,
- 0x0);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x0);
} else {
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
- 0x20);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x20);
}
break;
@@ -6440,9 +6585,9 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
&val);
if (!((val &
- MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
- >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)){
- DP(NETIF_MSG_LINK, "Seting LINK_SIGNAL\n");
+ MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
+ >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) {
+ DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n");
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LINK_SIGNAL,
@@ -6451,24 +6596,24 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
/* Set LED masks */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED1_MASK,
- 0x10);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED1_MASK,
+ 0x10);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED2_MASK,
- 0x80);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x80);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED3_MASK,
- 0x98);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x98);
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8481_LED5_MASK,
- 0x40);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x40);
} else {
bnx2x_cl45_write(bp, phy,
@@ -6513,10 +6658,10 @@ static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy,
/* Restore normal power mode*/
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
/* HW reset */
bnx2x_ext_phy_hw_reset(bp, params->port);
- bnx2x_wait_reset_complete(bp, phy);
+ bnx2x_wait_reset_complete(bp, phy, params);
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1);
@@ -6563,9 +6708,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
val2, val1);
link_up = ((val1 & 4) == 4);
- /* if link is up
- * print the AN outcome of the SFX7101 PHY
- */
+ /* if link is up print the AN outcome of the SFX7101 PHY */
if (link_up) {
bnx2x_cl45_read(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
@@ -6599,20 +6742,20 @@ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy)
u16 val, cnt;
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7101_RESET, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET, &val);
for (cnt = 0; cnt < 10; cnt++) {
msleep(50);
/* Writes a self-clearing reset */
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7101_RESET,
- (val | (1<<15)));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET,
+ (val | (1<<15)));
/* Wait for clear */
bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_7101_RESET, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_7101_RESET, &val);
if ((val & (1<<15)) == 0)
break;
@@ -6623,10 +6766,10 @@ static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy,
struct link_params *params) {
/* Low power mode is controlled by GPIO 2 */
bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
/* The PHY reset is controlled by GPIO 1 */
bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
}
static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
@@ -6668,9 +6811,9 @@ static struct bnx2x_phy phy_null = {
.supported = 0,
.media_type = ETH_PHY_NOT_PRESENT,
.ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
.config_init = (config_init_t)NULL,
@@ -6705,8 +6848,8 @@ static struct bnx2x_phy phy_serdes = {
.media_type = ETH_PHY_UNSPECIFIED,
.ver_addr = 0,
.req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
.config_init = (config_init_t)bnx2x_init_serdes,
@@ -6742,8 +6885,8 @@ static struct bnx2x_phy phy_xgxs = {
.media_type = ETH_PHY_UNSPECIFIED,
.ver_addr = 0,
.req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
.config_init = (config_init_t)bnx2x_init_xgxs,
@@ -6773,8 +6916,8 @@ static struct bnx2x_phy phy_7101 = {
.media_type = ETH_PHY_BASE_T,
.ver_addr = 0,
.req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
.config_init = (config_init_t)bnx2x_7101_config_init,
@@ -6804,9 +6947,9 @@ static struct bnx2x_phy phy_8073 = {
SUPPORTED_Asym_Pause),
.media_type = ETH_PHY_UNSPECIFIED,
.ver_addr = 0,
- .req_flow_ctrl = 0,
- .req_line_speed = 0,
- .speed_cap_mask = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
.config_init = (config_init_t)bnx2x_8073_config_init,
@@ -7015,6 +7158,43 @@ static struct bnx2x_phy phy_84823 = {
.phy_specific_func = (phy_specific_func_t)NULL
};
+static struct bnx2x_phy phy_84833 = {
+ .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
+ .addr = 0xff,
+ .flags = FLAGS_FAN_FAILURE_DET_REQ |
+ FLAGS_REARM_LATCH_SIGNAL,
+ .def_md_devad = 0,
+ .reserved = 0,
+ .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+ .mdio_ctrl = 0,
+ .supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .media_type = ETH_PHY_BASE_T,
+ .ver_addr = 0,
+ .req_flow_ctrl = 0,
+ .req_line_speed = 0,
+ .speed_cap_mask = 0,
+ .req_duplex = 0,
+ .rsrv = 0,
+ .config_init = (config_init_t)bnx2x_848x3_config_init,
+ .read_status = (read_status_t)bnx2x_848xx_read_status,
+ .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
+ .config_loopback = (config_loopback_t)NULL,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
+ .hw_reset = (hw_reset_t)NULL,
+ .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
+ .phy_specific_func = (phy_specific_func_t)NULL
+};
+
/*****************************************************************/
/* */
/* Populate the phy according. Main function: bnx2x_populate_phy */
@@ -7028,7 +7208,7 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
/* Get the 4 lanes xgxs config rx and tx */
u32 rx = 0, tx = 0, i;
for (i = 0; i < 2; i++) {
- /**
+ /*
* INT_PHY and EXT_PHY1 share the same value location in the
* shmem. When num_phys is greater than 1, than this value
* applies only to EXT_PHY1
@@ -7036,19 +7216,19 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
rx = REG_RD(bp, shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
+ dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
tx = REG_RD(bp, shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
+ dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
} else {
rx = REG_RD(bp, shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+ dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
tx = REG_RD(bp, shmem_base +
offsetof(struct shmem_region,
- dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+ dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
}
phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
@@ -7168,6 +7348,9 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
*phy = phy_84823;
break;
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+ *phy = phy_84833;
+ break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
*phy = phy_7101;
break;
@@ -7182,21 +7365,21 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
- /**
- * The shmem address of the phy version is located on different
- * structures. In case this structure is too old, do not set
- * the address
- */
+ /*
+ * The shmem address of the phy version is located on different
+ * structures. In case this structure is too old, do not set
+ * the address
+ */
config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
dev_info.shared_hw_config.config2));
if (phy_index == EXT_PHY1) {
phy->ver_addr = shmem_base + offsetof(struct shmem_region,
port_mb[port].ext_phy_fw_version);
- /* Check specific mdc mdio settings */
- if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
- mdc_mdio_access = config2 &
- SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
+ /* Check specific mdc mdio settings */
+ if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
+ mdc_mdio_access = config2 &
+ SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
} else {
u32 size = REG_RD(bp, shmem2_base);
@@ -7215,7 +7398,7 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp,
}
phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
- /**
+ /*
* In case mdc/mdio_access of the external phy is different than the
* mdc/mdio access of the XGXS, a HW lock must be taken in each access
* to prevent one port interfere with another port's CL45 operations.
@@ -7250,18 +7433,20 @@ static void bnx2x_phy_def_cfg(struct link_params *params,
/* Populate the default phy configuration for MF mode */
if (phy_index == EXT_PHY2) {
link_config = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
+ offsetof(struct shmem_region, dev_info.
port_feature_config[params->port].link_config2));
phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
+ offsetof(struct shmem_region,
+ dev_info.
port_hw_config[params->port].speed_capability_mask2));
} else {
link_config = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
+ offsetof(struct shmem_region, dev_info.
port_feature_config[params->port].link_config));
phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region, dev_info.
- port_hw_config[params->port].speed_capability_mask));
+ offsetof(struct shmem_region,
+ dev_info.
+ port_hw_config[params->port].speed_capability_mask));
}
DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask"
" 0x%x\n", phy_index, link_config, phy->speed_cap_mask);
@@ -7408,7 +7593,7 @@ static void set_phy_vars(struct link_params *params)
else if (phy_index == EXT_PHY2)
actual_phy_idx = EXT_PHY1;
}
- params->phy[actual_phy_idx].req_flow_ctrl =
+ params->phy[actual_phy_idx].req_flow_ctrl =
params->req_flow_ctrl[link_cfg_idx];
params->phy[actual_phy_idx].req_line_speed =
@@ -7461,57 +7646,6 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
set_phy_vars(params);
DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
- if (CHIP_REV_IS_FPGA(bp)) {
-
- vars->link_up = 1;
- vars->line_speed = SPEED_10000;
- vars->duplex = DUPLEX_FULL;
- vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
- /* enable on E1.5 FPGA */
- if (CHIP_IS_E1H(bp)) {
- vars->flow_ctrl |=
- (BNX2X_FLOW_CTRL_TX |
- BNX2X_FLOW_CTRL_RX);
- vars->link_status |=
- (LINK_STATUS_TX_FLOW_CONTROL_ENABLED |
- LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
- }
-
- bnx2x_emac_enable(params, vars, 0);
- if (!(CHIP_IS_E2(bp)))
- bnx2x_pbf_update(params, vars->flow_ctrl,
- vars->line_speed);
- /* disable drain */
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
-
- /* update shared memory */
- bnx2x_update_mng(params, vars->link_status);
-
- return 0;
-
- } else
- if (CHIP_REV_IS_EMUL(bp)) {
-
- vars->link_up = 1;
- vars->line_speed = SPEED_10000;
- vars->duplex = DUPLEX_FULL;
- vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
-
- bnx2x_bmac_enable(params, vars, 0);
-
- bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed);
- /* Disable drain */
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
- + params->port*4, 0);
-
- /* update shared memory */
- bnx2x_update_mng(params, vars->link_status);
-
- return 0;
-
- } else
if (params->loopback_mode == LOOPBACK_BMAC) {
vars->link_up = 1;
@@ -7527,8 +7661,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
/* set bmac loopback */
bnx2x_bmac_enable(params, vars, 1);
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
- params->port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
} else if (params->loopback_mode == LOOPBACK_EMAC) {
@@ -7544,8 +7677,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
/* set bmac loopback */
bnx2x_emac_enable(params, vars, 1);
bnx2x_emac_program(params, vars);
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
- params->port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
} else if ((params->loopback_mode == LOOPBACK_XGXS) ||
(params->loopback_mode == LOOPBACK_EXT_PHY)) {
@@ -7568,8 +7700,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
bnx2x_emac_program(params, vars);
bnx2x_emac_enable(params, vars, 0);
} else
- bnx2x_bmac_enable(params, vars, 0);
-
+ bnx2x_bmac_enable(params, vars, 0);
if (params->loopback_mode == LOOPBACK_XGXS) {
/* set 10G XGXS loopback */
params->phy[INT_PHY].config_loopback(
@@ -7587,9 +7718,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
params);
}
}
-
- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
- params->port*4, 0);
+ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
bnx2x_set_led(params, vars,
LED_MODE_OPER, vars->line_speed);
@@ -7608,7 +7737,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
return 0;
}
u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
- u8 reset_ext_phy)
+ u8 reset_ext_phy)
{
struct bnx2x *bp = params->bp;
u8 phy_index, port = params->port, clear_latch_ind = 0;
@@ -7617,10 +7746,10 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
vars->link_status = 0;
bnx2x_update_mng(params, vars->link_status);
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
- (NIG_MASK_XGXS0_LINK_STATUS |
- NIG_MASK_XGXS0_LINK10G |
- NIG_MASK_SERDES0_LINK_STATUS |
- NIG_MASK_MI_INT));
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
/* activate nig drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
@@ -7719,21 +7848,22 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
/* disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
port_of_path*4,
- (NIG_MASK_XGXS0_LINK_STATUS |
- NIG_MASK_XGXS0_LINK10G |
- NIG_MASK_SERDES0_LINK_STATUS |
- NIG_MASK_MI_INT));
+ (NIG_MASK_XGXS0_LINK_STATUS |
+ NIG_MASK_XGXS0_LINK10G |
+ NIG_MASK_SERDES0_LINK_STATUS |
+ NIG_MASK_MI_INT));
/* Need to take the phy out of low power mode in order
to write to access its registers */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+ MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
/* Reset the phy */
bnx2x_cl45_write(bp, &phy[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 1<<15);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL,
+ 1<<15);
}
/* Add delay of 150ms after reset */
@@ -7762,18 +7892,20 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
/* Only set bit 10 = 1 (Tx power down) */
bnx2x_cl45_read(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, &val);
/* Phase1 of TX_POWER_DOWN reset */
bnx2x_cl45_write(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN,
- (val | 1<<10));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN,
+ (val | 1<<10));
}
- /* Toggle Transmitter: Power down and then up with 600ms
- delay between */
+ /*
+ * Toggle Transmitter: Power down and then up with 600ms delay
+ * between
+ */
msleep(600);
/* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
@@ -7781,25 +7913,25 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
/* Phase2 of POWER_DOWN_RESET */
/* Release bit 10 (Release Tx power down) */
bnx2x_cl45_read(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, &val);
bnx2x_cl45_write(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
msleep(15);
/* Read modify write the SPI-ROM version select register */
bnx2x_cl45_read(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_EDC_FFE_MAIN, &val);
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_EDC_FFE_MAIN, &val);
bnx2x_cl45_write(bp, phy_blk[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
/* set GPIO2 back to LOW */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
- MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+ MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
}
return 0;
}
@@ -7846,32 +7978,90 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
/* Set fault module detected LED on */
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
- MISC_REGISTERS_GPIO_HIGH,
- port);
+ MISC_REGISTERS_GPIO_HIGH,
+ port);
}
return 0;
}
+static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base,
+ u8 *io_gpio, u8 *io_port)
+{
+
+ u32 phy_gpio_reset = REG_RD(bp, shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[PORT_0].default_cfg));
+ switch (phy_gpio_reset) {
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0:
+ *io_gpio = 0;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0:
+ *io_gpio = 1;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0:
+ *io_gpio = 2;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0:
+ *io_gpio = 3;
+ *io_port = 0;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1:
+ *io_gpio = 0;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1:
+ *io_gpio = 1;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1:
+ *io_gpio = 2;
+ *io_port = 1;
+ break;
+ case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1:
+ *io_gpio = 3;
+ *io_port = 1;
+ break;
+ default:
+ /* Don't override the io_gpio and io_port */
+ break;
+ }
+}
static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
u32 shmem_base_path[],
u32 shmem2_base_path[], u8 phy_index,
u32 chip_id)
{
- s8 port;
+ s8 port, reset_gpio;
u32 swap_val, swap_override;
struct bnx2x_phy phy[PORT_MAX];
struct bnx2x_phy *phy_blk[PORT_MAX];
s8 port_of_path;
- swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
- swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+ swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+ reset_gpio = MISC_REGISTERS_GPIO_1;
port = 1;
- bnx2x_ext_phy_hw_reset(bp, port ^ (swap_val && swap_override));
+ /*
+ * Retrieve the reset gpio/port which control the reset.
+ * Default is GPIO1, PORT1
+ */
+ bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
+ (u8 *)&reset_gpio, (u8 *)&port);
/* Calculate the port based on port swap */
port ^= (swap_val && swap_override);
+ /* Initiate PHY reset*/
+ bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
+ port);
+ msleep(1);
+ bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+ port);
+
msleep(5);
/* PART1 - Reset both phys */
@@ -7907,9 +8097,7 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
/* Reset the phy */
bnx2x_cl45_write(bp, &phy[port],
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL,
- 1<<15);
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
}
/* Add delay of 150ms after reset */
@@ -7923,7 +8111,7 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
}
/* PART2 - Download firmware to both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
- if (CHIP_IS_E2(bp))
+ if (CHIP_IS_E2(bp))
port_of_path = 0;
else
port_of_path = port;
@@ -7958,8 +8146,10 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- /* GPIO1 affects both ports, so there's need to pull
- it for single port alone */
+ /*
+ * GPIO1 affects both ports, so there's need to pull
+ * it for single port alone
+ */
rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
shmem2_base_path,
phy_index, chip_id);
@@ -7969,11 +8159,15 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
break;
default:
DP(NETIF_MSG_LINK,
- "bnx2x_common_init_phy: ext_phy 0x%x not required\n",
- ext_phy_type);
+ "ext_phy 0x%x common init not required\n",
+ ext_phy_type);
break;
}
+ if (rc != 0)
+ netdev_err(bp->dev, "Warning: PHY was not initialized,"
+ " Port %d\n",
+ 0);
return rc;
}
@@ -7986,9 +8180,6 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
u32 ext_phy_type, ext_phy_config;
DP(NETIF_MSG_LINK, "Begin common phy init\n");
- if (CHIP_REV_IS_EMUL(bp))
- return 0;
-
/* Check if common init was already done */
phy_ver = REG_RD(bp, shmem_base_path[0] +
offsetof(struct shmem_region,
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index bedab1a..92f36b6 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -1,4 +1,4 @@
-/* Copyright 2008-2010 Broadcom Corporation
+/* Copyright 2008-2011 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -33,7 +33,7 @@
#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
-#define SPEED_AUTO_NEG 0
+#define SPEED_AUTO_NEG 0
#define SPEED_12000 12000
#define SPEED_12500 12500
#define SPEED_13000 13000
@@ -44,8 +44,8 @@
#define SFP_EEPROM_VENDOR_NAME_SIZE 16
#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
#define SFP_EEPROM_VENDOR_OUI_SIZE 3
-#define SFP_EEPROM_PART_NO_ADDR 0x28
-#define SFP_EEPROM_PART_NO_SIZE 16
+#define SFP_EEPROM_PART_NO_ADDR 0x28
+#define SFP_EEPROM_PART_NO_SIZE 16
#define PWR_FLT_ERR_MSG_LEN 250
#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
@@ -62,7 +62,7 @@
#define SINGLE_MEDIA(params) (params->num_phys == 2)
/* Dual Media board contains two external phy with different media */
#define DUAL_MEDIA(params) (params->num_phys == 3)
-#define FW_PARAM_MDIO_CTRL_OFFSET 16
+#define FW_PARAM_MDIO_CTRL_OFFSET 16
#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
(phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
@@ -201,12 +201,14 @@ struct link_params {
/* Default / User Configuration */
u8 loopback_mode;
-#define LOOPBACK_NONE 0
-#define LOOPBACK_EMAC 1
-#define LOOPBACK_BMAC 2
+#define LOOPBACK_NONE 0
+#define LOOPBACK_EMAC 1
+#define LOOPBACK_BMAC 2
#define LOOPBACK_XGXS 3
#define LOOPBACK_EXT_PHY 4
-#define LOOPBACK_EXT 5
+#define LOOPBACK_EXT 5
+#define LOOPBACK_UMAC 6
+#define LOOPBACK_XMAC 7
/* Device parameters */
u8 mac_addr[6];
@@ -230,10 +232,11 @@ struct link_params {
/* Phy register parameter */
u32 chip_id;
+ /* features */
u32 feature_config_flags;
-#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
-#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
-#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
+#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
+#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
+#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
/* Will be populated during common init */
struct bnx2x_phy phy[MAX_PHYS];
@@ -334,6 +337,11 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
/* Reset the external of SFX7101 */
void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
+/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
+u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u16 addr,
+ u8 byte_cnt, u8 *o_buf);
+
void bnx2x_hw_reset_phy(struct link_params *params);
/* Checks if HW lock is required for this phy/board type */
@@ -379,7 +387,7 @@ void bnx2x_ets_disabled(struct link_params *params);
/* Used to configure the ETS to BW limited */
void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
- const u32 cos1_bw);
+ const u32 cos1_bw);
/* Used to configure the ETS to strict */
u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index d584d32..6c7745e 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -586,7 +586,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
/* lock the dmae channel */
- mutex_lock(&bp->dmae_mutex);
+ spin_lock_bh(&bp->dmae_lock);
/* reset completion */
*wb_comp = 0;
@@ -617,7 +617,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
unlock:
- mutex_unlock(&bp->dmae_mutex);
+ spin_unlock_bh(&bp->dmae_lock);
return rc;
}
@@ -1397,7 +1397,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp,
}
smp_mb__before_atomic_inc();
- atomic_inc(&bp->spq_left);
+ atomic_inc(&bp->cq_spq_left);
/* push the change in fp->state and towards the memory */
smp_wmb();
@@ -2473,8 +2473,14 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
rxq_init->sge_map = fp->rx_sge_mapping;
rxq_init->rcq_map = fp->rx_comp_mapping;
rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
- rxq_init->mtu = bp->dev->mtu;
- rxq_init->buf_sz = bp->rx_buf_size;
+
+ /* Always use mini-jumbo MTU for FCoE L2 ring */
+ if (IS_FCOE_FP(fp))
+ rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
+ else
+ rxq_init->mtu = bp->dev->mtu;
+
+ rxq_init->buf_sz = fp->rx_buf_size;
rxq_init->cl_qzone_id = fp->cl_qzone_id;
rxq_init->cl_id = fp->cl_id;
rxq_init->spcl_id = fp->cl_id;
@@ -2726,11 +2732,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
spin_lock_bh(&bp->spq_lock);
- if (!atomic_read(&bp->spq_left)) {
- BNX2X_ERR("BUG! SPQ ring full!\n");
- spin_unlock_bh(&bp->spq_lock);
- bnx2x_panic();
- return -EBUSY;
+ if (common) {
+ if (!atomic_read(&bp->eq_spq_left)) {
+ BNX2X_ERR("BUG! EQ ring full!\n");
+ spin_unlock_bh(&bp->spq_lock);
+ bnx2x_panic();
+ return -EBUSY;
+ }
+ } else if (!atomic_read(&bp->cq_spq_left)) {
+ BNX2X_ERR("BUG! SPQ ring full!\n");
+ spin_unlock_bh(&bp->spq_lock);
+ bnx2x_panic();
+ return -EBUSY;
}
spe = bnx2x_sp_get_next(bp);
@@ -2761,20 +2774,26 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
/* stats ramrod has it's own slot on the spq */
- if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
+ if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
/* It's ok if the actual decrement is issued towards the memory
* somewhere between the spin_lock and spin_unlock. Thus no
* more explict memory barrier is needed.
*/
- atomic_dec(&bp->spq_left);
+ if (common)
+ atomic_dec(&bp->eq_spq_left);
+ else
+ atomic_dec(&bp->cq_spq_left);
+ }
+
DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
"SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
- "type(0x%x) left %x\n",
+ "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
(u32)(U64_LO(bp->spq_mapping) +
(void *)bp->spq_prod_bd - (void *)bp->spq), command,
- HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
+ HW_CID(bp, cid), data_hi, data_lo, type,
+ atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
bnx2x_sp_prod_update(bp);
spin_unlock_bh(&bp->spq_lock);
@@ -3686,8 +3705,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
sw_cons = bp->eq_cons;
sw_prod = bp->eq_prod;
- DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
- hw_cons, sw_cons, atomic_read(&bp->spq_left));
+ DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
+ hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
for (; sw_cons != hw_cons;
sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
@@ -3752,13 +3771,15 @@ static void bnx2x_eq_int(struct bnx2x *bp)
case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
- bp->set_mac_pending = 0;
+ if (elem->message.data.set_mac_event.echo)
+ bp->set_mac_pending = 0;
break;
case (EVENT_RING_OPCODE_SET_MAC |
BNX2X_STATE_CLOSING_WAIT4_HALT):
DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
- bp->set_mac_pending = 0;
+ if (elem->message.data.set_mac_event.echo)
+ bp->set_mac_pending = 0;
break;
default:
/* unknown event log error and continue */
@@ -3770,7 +3791,7 @@ next_spqe:
} /* for */
smp_mb__before_atomic_inc();
- atomic_add(spqe_cnt, &bp->spq_left);
+ atomic_add(spqe_cnt, &bp->eq_spq_left);
bp->eq_cons = sw_cons;
bp->eq_prod = sw_prod;
@@ -4203,7 +4224,7 @@ void bnx2x_update_coalesce(struct bnx2x *bp)
static void bnx2x_init_sp_ring(struct bnx2x *bp)
{
spin_lock_init(&bp->spq_lock);
- atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
+ atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
bp->spq_prod_idx = 0;
bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
@@ -4228,9 +4249,12 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
bp->eq_cons = 0;
bp->eq_prod = NUM_EQ_DESC;
bp->eq_cons_sb = BNX2X_EQ_INDEX;
+ /* we want a warning message before it gets rought... */
+ atomic_set(&bp->eq_spq_left,
+ min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
}
-static void bnx2x_init_ind_table(struct bnx2x *bp)
+void bnx2x_push_indir_table(struct bnx2x *bp)
{
int func = BP_FUNC(bp);
int i;
@@ -4238,13 +4262,20 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
return;
- DP(NETIF_MSG_IFUP,
- "Initializing indirection table multi_mode %d\n", bp->multi_mode);
for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
REG_WR8(bp, BAR_TSTRORM_INTMEM +
TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
- bp->fp->cl_id + (i % (bp->num_queues -
- NONE_ETH_CONTEXT_USE)));
+ bp->fp->cl_id + bp->rx_indir_table[i]);
+}
+
+static void bnx2x_init_ind_table(struct bnx2x *bp)
+{
+ int i;
+
+ for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
+ bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
+
+ bnx2x_push_indir_table(bp);
}
void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
@@ -5840,7 +5871,7 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
BP_ABS_FUNC(bp), load_code);
bp->dmae_ready = 0;
- mutex_init(&bp->dmae_mutex);
+ spin_lock_init(&bp->dmae_lock);
rc = bnx2x_gunzip_init(bp);
if (rc)
return rc;
@@ -5992,6 +6023,8 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
BCM_PAGE_SIZE * NUM_EQ_PAGES);
+ BNX2X_FREE(bp->rx_indir_table);
+
#undef BNX2X_PCI_FREE
#undef BNX2X_KFREE
}
@@ -6122,6 +6155,9 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
/* EQ */
BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
BCM_PAGE_SIZE * NUM_EQ_PAGES);
+
+ BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
+ TSTORM_INDIRECTION_TABLE_SIZE);
return 0;
alloc_mem_err:
@@ -6175,12 +6211,14 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
int ramrod_flags = WAIT_RAMROD_COMMON;
bp->set_mac_pending = 1;
- smp_wmb();
config->hdr.length = 1;
config->hdr.offset = cam_offset;
config->hdr.client_id = 0xff;
- config->hdr.reserved1 = 0;
+ /* Mark the single MAC configuration ramrod as opposed to a
+ * UC/MC list configuration).
+ */
+ config->hdr.echo = 1;
/* primary MAC */
config->config_table[0].msb_mac_addr =
@@ -6212,6 +6250,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
config->config_table[0].middle_mac_addr,
config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
+ mb();
+
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
U64_HI(bnx2x_sp_mapping(bp, mac_config)),
U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
@@ -6276,20 +6316,15 @@ static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
if (CHIP_IS_E1H(bp))
return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
else if (CHIP_MODE_IS_4_PORT(bp))
- return BP_FUNC(bp) * 32 + rel_offset;
+ return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
else
- return BP_VN(bp) * 32 + rel_offset;
+ return E2_FUNC_MAX * rel_offset + BP_VN(bp);
}
/**
* LLH CAM line allocations: currently only iSCSI and ETH macs are
* relevant. In addition, current implementation is tuned for a
* single ETH MAC.
- *
- * When multiple unicast ETH MACs PF configuration in switch
- * independent mode is required (NetQ, multiple netdev MACs,
- * etc.), consider better utilisation of 16 per function MAC
- * entries in the LLH memory.
*/
enum {
LLH_CAM_ISCSI_ETH_LINE = 0,
@@ -6364,14 +6399,37 @@ void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
}
}
-static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
+
+static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
+{
+ return CHIP_REV_IS_SLOW(bp) ?
+ (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
+ (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
+}
+
+/* set mc list, do not wait as wait implies sleep and
+ * set_rx_mode can be invoked from non-sleepable context.
+ *
+ * Instead we use the same ramrod data buffer each time we need
+ * to configure a list of addresses, and use the fact that the
+ * list of MACs is changed in an incremental way and that the
+ * function is called under the netif_addr_lock. A temporary
+ * inconsistent CAM configuration (possible in case of a very fast
+ * sequence of add/del/add on the host side) will shortly be
+ * restored by the handler of the last ramrod.
+ */
+static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
{
int i = 0, old;
struct net_device *dev = bp->dev;
+ u8 offset = bnx2x_e1_cam_mc_offset(bp);
struct netdev_hw_addr *ha;
struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
+ if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
+ return -EINVAL;
+
netdev_for_each_mc_addr(ha, dev) {
/* copy mac */
config_cmd->config_table[i].msb_mac_addr =
@@ -6412,32 +6470,47 @@ static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
}
}
+ wmb();
+
config_cmd->hdr.length = i;
config_cmd->hdr.offset = offset;
config_cmd->hdr.client_id = 0xff;
- config_cmd->hdr.reserved1 = 0;
+ /* Mark that this ramrod doesn't use bp->set_mac_pending for
+ * synchronization.
+ */
+ config_cmd->hdr.echo = 0;
- bp->set_mac_pending = 1;
- smp_wmb();
+ mb();
- bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
}
-static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
+
+void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
{
int i;
struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
int ramrod_flags = WAIT_RAMROD_COMMON;
+ u8 offset = bnx2x_e1_cam_mc_offset(bp);
- bp->set_mac_pending = 1;
- smp_wmb();
-
- for (i = 0; i < config_cmd->hdr.length; i++)
+ for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
SET_FLAG(config_cmd->config_table[i].flags,
MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
T_ETH_MAC_COMMAND_INVALIDATE);
+ wmb();
+
+ config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
+ config_cmd->hdr.offset = offset;
+ config_cmd->hdr.client_id = 0xff;
+ /* We'll wait for a completion this time... */
+ config_cmd->hdr.echo = 1;
+
+ bp->set_mac_pending = 1;
+
+ mb();
+
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
@@ -6447,6 +6520,44 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
}
+/* Accept one or more multicasts */
+static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
+{
+ struct net_device *dev = bp->dev;
+ struct netdev_hw_addr *ha;
+ u32 mc_filter[MC_HASH_SIZE];
+ u32 crc, bit, regidx;
+ int i;
+
+ memset(mc_filter, 0, 4 * MC_HASH_SIZE);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
+ bnx2x_mc_addr(ha));
+
+ crc = crc32c_le(0, bnx2x_mc_addr(ha),
+ ETH_ALEN);
+ bit = (crc >> 24) & 0xff;
+ regidx = bit >> 5;
+ bit &= 0x1f;
+ mc_filter[regidx] |= (1 << bit);
+ }
+
+ for (i = 0; i < MC_HASH_SIZE; i++)
+ REG_WR(bp, MC_HASH_OFFSET(bp, i),
+ mc_filter[i]);
+
+ return 0;
+}
+
+void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
+{
+ int i;
+
+ for (i = 0; i < MC_HASH_SIZE; i++)
+ REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
+}
+
#ifdef BCM_CNIC
/**
* Set iSCSI MAC(s) at the next enties in the CAM after the ETH
@@ -6465,12 +6576,13 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
+ u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
/* Send a SET_MAC ramrod */
- bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
+ bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
cam_offset, 0);
- bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
+ bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
return 0;
}
@@ -7112,20 +7224,15 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
/* Give HW time to discard old tx messages */
msleep(1);
- if (CHIP_IS_E1(bp)) {
- /* invalidate mc list,
- * wait and poll (interrupts are off)
- */
- bnx2x_invlidate_e1_mc_list(bp);
- bnx2x_set_eth_mac(bp, 0);
-
- } else {
- REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+ bnx2x_set_eth_mac(bp, 0);
- bnx2x_set_eth_mac(bp, 0);
+ bnx2x_invalidate_uc_list(bp);
- for (i = 0; i < MC_HASH_SIZE; i++)
- REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
+ if (CHIP_IS_E1(bp))
+ bnx2x_invalidate_e1_mc_list(bp);
+ else {
+ bnx2x_invalidate_e1h_mc_list(bp);
+ REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
}
#ifdef BCM_CNIC
@@ -8394,11 +8501,47 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->common.shmem2_base);
}
+#ifdef BCM_CNIC
+static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+{
+ u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+ drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
+ u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+ drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
+
+ /* Get the number of maximum allowed iSCSI and FCoE connections */
+ bp->cnic_eth_dev.max_iscsi_conn =
+ (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
+ BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
+
+ bp->cnic_eth_dev.max_fcoe_conn =
+ (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
+ BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
+
+ BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
+ bp->cnic_eth_dev.max_iscsi_conn,
+ bp->cnic_eth_dev.max_fcoe_conn);
+
+ /* If mamimum allowed number of connections is zero -
+ * disable the feature.
+ */
+ if (!bp->cnic_eth_dev.max_iscsi_conn)
+ bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+
+ if (!bp->cnic_eth_dev.max_fcoe_conn)
+ bp->flags |= NO_FCOE_FLAG;
+}
+#endif
+
static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
{
u32 val, val2;
int func = BP_ABS_FUNC(bp);
int port = BP_PORT(bp);
+#ifdef BCM_CNIC
+ u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
+ u8 *fip_mac = bp->fip_mac;
+#endif
if (BP_NOMCP(bp)) {
BNX2X_ERROR("warning: random MAC workaround active\n");
@@ -8411,7 +8554,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
#ifdef BCM_CNIC
- /* iSCSI NPAR MAC */
+ /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+ * FCoE MAC then the appropriate feature should be disabled.
+ */
if (IS_MF_SI(bp)) {
u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
@@ -8419,8 +8564,39 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
iscsi_mac_addr_upper);
val = MF_CFG_RD(bp, func_ext_config[func].
iscsi_mac_addr_lower);
- bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
- }
+ BNX2X_DEV_INFO("Read iSCSI MAC: "
+ "0x%x:0x%04x\n", val2, val);
+ bnx2x_set_mac_buf(iscsi_mac, val, val2);
+
+ /* Disable iSCSI OOO if MAC configuration is
+ * invalid.
+ */
+ if (!is_valid_ether_addr(iscsi_mac)) {
+ bp->flags |= NO_ISCSI_OOO_FLAG |
+ NO_ISCSI_FLAG;
+ memset(iscsi_mac, 0, ETH_ALEN);
+ }
+ } else
+ bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+
+ if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
+ val2 = MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_mac_addr_upper);
+ val = MF_CFG_RD(bp, func_ext_config[func].
+ fcoe_mac_addr_lower);
+ BNX2X_DEV_INFO("Read FCoE MAC to "
+ "0x%x:0x%04x\n", val2, val);
+ bnx2x_set_mac_buf(fip_mac, val, val2);
+
+ /* Disable FCoE if MAC configuration is
+ * invalid.
+ */
+ if (!is_valid_ether_addr(fip_mac)) {
+ bp->flags |= NO_FCOE_FLAG;
+ memset(bp->fip_mac, 0, ETH_ALEN);
+ }
+ } else
+ bp->flags |= NO_FCOE_FLAG;
}
#endif
} else {
@@ -8434,7 +8610,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
iscsi_mac_upper);
val = SHMEM_RD(bp, dev_info.port_hw_config[port].
iscsi_mac_lower);
- bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
+ bnx2x_set_mac_buf(iscsi_mac, val, val2);
#endif
}
@@ -8442,14 +8618,12 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
#ifdef BCM_CNIC
- /* Inform the upper layers about FCoE MAC */
+ /* Set the FCoE MAC in modes other then MF_SI */
if (!CHIP_IS_E1x(bp)) {
if (IS_MF_SD(bp))
- memcpy(bp->fip_mac, bp->dev->dev_addr,
- sizeof(bp->fip_mac));
- else
- memcpy(bp->fip_mac, bp->iscsi_mac,
- sizeof(bp->fip_mac));
+ memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
+ else if (!IS_MF(bp))
+ memcpy(fip_mac, iscsi_mac, ETH_ALEN);
}
#endif
}
@@ -8612,6 +8786,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
/* Get MAC addresses */
bnx2x_get_mac_hwinfo(bp);
+#ifdef BCM_CNIC
+ bnx2x_get_cnic_info(bp);
+#endif
+
return rc;
}
@@ -8826,12 +9004,197 @@ static int bnx2x_close(struct net_device *dev)
return 0;
}
+#define E1_MAX_UC_LIST 29
+#define E1H_MAX_UC_LIST 30
+#define E2_MAX_UC_LIST 14
+static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
+{
+ if (CHIP_IS_E1(bp))
+ return E1_MAX_UC_LIST;
+ else if (CHIP_IS_E1H(bp))
+ return E1H_MAX_UC_LIST;
+ else
+ return E2_MAX_UC_LIST;
+}
+
+
+static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
+{
+ if (CHIP_IS_E1(bp))
+ /* CAM Entries for Port0:
+ * 0 - prim ETH MAC
+ * 1 - BCAST MAC
+ * 2 - iSCSI L2 ring ETH MAC
+ * 3-31 - UC MACs
+ *
+ * Port1 entries are allocated the same way starting from
+ * entry 32.
+ */
+ return 3 + 32 * BP_PORT(bp);
+ else if (CHIP_IS_E1H(bp)) {
+ /* CAM Entries:
+ * 0-7 - prim ETH MAC for each function
+ * 8-15 - iSCSI L2 ring ETH MAC for each function
+ * 16 till 255 UC MAC lists for each function
+ *
+ * Remark: There is no FCoE support for E1H, thus FCoE related
+ * MACs are not considered.
+ */
+ return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
+ bnx2x_max_uc_list(bp) * BP_FUNC(bp);
+ } else {
+ /* CAM Entries (there is a separate CAM per engine):
+ * 0-4 - prim ETH MAC for each function
+ * 4-7 - iSCSI L2 ring ETH MAC for each function
+ * 8-11 - FIP ucast L2 MAC for each function
+ * 12-15 - ALL_ENODE_MACS mcast MAC for each function
+ * 16 till 71 UC MAC lists for each function
+ */
+ u8 func_idx =
+ (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
+
+ return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
+ bnx2x_max_uc_list(bp) * func_idx;
+ }
+}
+
+/* set uc list, do not wait as wait implies sleep and
+ * set_rx_mode can be invoked from non-sleepable context.
+ *
+ * Instead we use the same ramrod data buffer each time we need
+ * to configure a list of addresses, and use the fact that the
+ * list of MACs is changed in an incremental way and that the
+ * function is called under the netif_addr_lock. A temporary
+ * inconsistent CAM configuration (possible in case of very fast
+ * sequence of add/del/add on the host side) will shortly be
+ * restored by the handler of the last ramrod.
+ */
+static int bnx2x_set_uc_list(struct bnx2x *bp)
+{
+ int i = 0, old;
+ struct net_device *dev = bp->dev;
+ u8 offset = bnx2x_uc_list_cam_offset(bp);
+ struct netdev_hw_addr *ha;
+ struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
+ dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
+
+ if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
+ return -EINVAL;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ /* copy mac */
+ config_cmd->config_table[i].msb_mac_addr =
+ swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
+ config_cmd->config_table[i].middle_mac_addr =
+ swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
+ config_cmd->config_table[i].lsb_mac_addr =
+ swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
+
+ config_cmd->config_table[i].vlan_id = 0;
+ config_cmd->config_table[i].pf_id = BP_FUNC(bp);
+ config_cmd->config_table[i].clients_bit_vector =
+ cpu_to_le32(1 << BP_L_ID(bp));
+
+ SET_FLAG(config_cmd->config_table[i].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_SET);
+
+ DP(NETIF_MSG_IFUP,
+ "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
+ config_cmd->config_table[i].msb_mac_addr,
+ config_cmd->config_table[i].middle_mac_addr,
+ config_cmd->config_table[i].lsb_mac_addr);
+
+ i++;
+
+ /* Set uc MAC in NIG */
+ bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
+ LLH_CAM_ETH_LINE + i);
+ }
+ old = config_cmd->hdr.length;
+ if (old > i) {
+ for (; i < old; i++) {
+ if (CAM_IS_INVALID(config_cmd->
+ config_table[i])) {
+ /* already invalidated */
+ break;
+ }
+ /* invalidate */
+ SET_FLAG(config_cmd->config_table[i].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_INVALIDATE);
+ }
+ }
+
+ wmb();
+
+ config_cmd->hdr.length = i;
+ config_cmd->hdr.offset = offset;
+ config_cmd->hdr.client_id = 0xff;
+ /* Mark that this ramrod doesn't use bp->set_mac_pending for
+ * synchronization.
+ */
+ config_cmd->hdr.echo = 0;
+
+ mb();
+
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
+ U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
+
+}
+
+void bnx2x_invalidate_uc_list(struct bnx2x *bp)
+{
+ int i;
+ struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
+ dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
+ int ramrod_flags = WAIT_RAMROD_COMMON;
+ u8 offset = bnx2x_uc_list_cam_offset(bp);
+ u8 max_list_size = bnx2x_max_uc_list(bp);
+
+ for (i = 0; i < max_list_size; i++) {
+ SET_FLAG(config_cmd->config_table[i].flags,
+ MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+ T_ETH_MAC_COMMAND_INVALIDATE);
+ bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
+ }
+
+ wmb();
+
+ config_cmd->hdr.length = max_list_size;
+ config_cmd->hdr.offset = offset;
+ config_cmd->hdr.client_id = 0xff;
+ /* We'll wait for a completion this time... */
+ config_cmd->hdr.echo = 1;
+
+ bp->set_mac_pending = 1;
+
+ mb();
+
+ bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
+ U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
+
+ /* Wait for a completion */
+ bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
+ ramrod_flags);
+
+}
+
+static inline int bnx2x_set_mc_list(struct bnx2x *bp)
+{
+ /* some multicasts */
+ if (CHIP_IS_E1(bp)) {
+ return bnx2x_set_e1_mc_list(bp);
+ } else { /* E1H and newer */
+ return bnx2x_set_e1h_mc_list(bp);
+ }
+}
+
/* called with netif_tx_lock from dev_mcast.c */
void bnx2x_set_rx_mode(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
u32 rx_mode = BNX2X_RX_MODE_NORMAL;
- int port = BP_PORT(bp);
if (bp->state != BNX2X_STATE_OPEN) {
DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
@@ -8842,47 +9205,16 @@ void bnx2x_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
rx_mode = BNX2X_RX_MODE_PROMISC;
- else if ((dev->flags & IFF_ALLMULTI) ||
- ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
- CHIP_IS_E1(bp)))
+ else if (dev->flags & IFF_ALLMULTI)
rx_mode = BNX2X_RX_MODE_ALLMULTI;
- else { /* some multicasts */
- if (CHIP_IS_E1(bp)) {
- /*
- * set mc list, do not wait as wait implies sleep
- * and set_rx_mode can be invoked from non-sleepable
- * context
- */
- u8 offset = (CHIP_REV_IS_SLOW(bp) ?
- BNX2X_MAX_EMUL_MULTI*(1 + port) :
- BNX2X_MAX_MULTICAST*(1 + port));
-
- bnx2x_set_e1_mc_list(bp, offset);
- } else { /* E1H */
- /* Accept one or more multicasts */
- struct netdev_hw_addr *ha;
- u32 mc_filter[MC_HASH_SIZE];
- u32 crc, bit, regidx;
- int i;
-
- memset(mc_filter, 0, 4 * MC_HASH_SIZE);
-
- netdev_for_each_mc_addr(ha, dev) {
- DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
- bnx2x_mc_addr(ha));
-
- crc = crc32c_le(0, bnx2x_mc_addr(ha),
- ETH_ALEN);
- bit = (crc >> 24) & 0xff;
- regidx = bit >> 5;
- bit &= 0x1f;
- mc_filter[regidx] |= (1 << bit);
- }
+ else {
+ /* some multicasts */
+ if (bnx2x_set_mc_list(bp))
+ rx_mode = BNX2X_RX_MODE_ALLMULTI;
- for (i = 0; i < MC_HASH_SIZE; i++)
- REG_WR(bp, MC_HASH_OFFSET(bp, i),
- mc_filter[i]);
- }
+ /* some unicasts */
+ if (bnx2x_set_uc_list(bp))
+ rx_mode = BNX2X_RX_MODE_PROMISC;
}
bp->rx_mode = rx_mode;
@@ -8963,7 +9295,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_stop = bnx2x_close,
.ndo_start_xmit = bnx2x_start_xmit,
.ndo_select_queue = bnx2x_select_queue,
- .ndo_set_multicast_list = bnx2x_set_rx_mode,
+ .ndo_set_rx_mode = bnx2x_set_rx_mode,
.ndo_set_mac_address = bnx2x_change_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = bnx2x_ioctl,
@@ -9789,15 +10121,21 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
HW_CID(bp, BNX2X_ISCSI_ETH_CID));
}
- /* There may be not more than 8 L2 and COMMON SPEs and not more
- * than 8 L5 SPEs in the air.
+ /* There may be not more than 8 L2 and not more than 8 L5 SPEs
+ * We also check that the number of outstanding
+ * COMMON ramrods is not more than the EQ and SPQ can
+ * accommodate.
*/
- if ((type == NONE_CONNECTION_TYPE) ||
- (type == ETH_CONNECTION_TYPE)) {
- if (!atomic_read(&bp->spq_left))
+ if (type == ETH_CONNECTION_TYPE) {
+ if (!atomic_read(&bp->cq_spq_left))
break;
else
- atomic_dec(&bp->spq_left);
+ atomic_dec(&bp->cq_spq_left);
+ } else if (type == NONE_CONNECTION_TYPE) {
+ if (!atomic_read(&bp->eq_spq_left))
+ break;
+ else
+ atomic_dec(&bp->eq_spq_left);
} else if ((type == ISCSI_CONNECTION_TYPE) ||
(type == FCOE_CONNECTION_TYPE)) {
if (bp->cnic_spq_pending >=
@@ -9875,7 +10213,8 @@ static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
int rc = 0;
mutex_lock(&bp->cnic_mutex);
- c_ops = bp->cnic_ops;
+ c_ops = rcu_dereference_protected(bp->cnic_ops,
+ lockdep_is_held(&bp->cnic_mutex));
if (c_ops)
rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
mutex_unlock(&bp->cnic_mutex);
@@ -9989,7 +10328,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
int count = ctl->data.credit.credit_count;
smp_mb__before_atomic_inc();
- atomic_add(count, &bp->spq_left);
+ atomic_add(count, &bp->cq_spq_left);
smp_mb__after_atomic_inc();
break;
}
@@ -10085,6 +10424,13 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
struct bnx2x *bp = netdev_priv(dev);
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+ /* If both iSCSI and FCoE are disabled - return NULL in
+ * order to indicate CNIC that it should not try to work
+ * with this device.
+ */
+ if (NO_ISCSI(bp) && NO_FCOE(bp))
+ return NULL;
+
cp->drv_owner = THIS_MODULE;
cp->chip_id = CHIP_ID(bp);
cp->pdev = bp->pdev;
@@ -10105,6 +10451,15 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
+ if (NO_ISCSI_OOO(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+
+ if (NO_ISCSI(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
+
+ if (NO_FCOE(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
+
DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
"starting cid %d\n",
cp->ctx_blk_size,
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index e01330b..1c89f19 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -6083,6 +6083,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808
#define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e
#define MDIO_PMA_REG_8727_PCS_GP 0xc842
+#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4
#define MDIO_AN_REG_8727_MISC_CTRL 0x8309
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 163e0b0..77e3c6a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1372,8 +1372,8 @@ static int bond_compute_features(struct bonding *bond)
{
struct slave *slave;
struct net_device *bond_dev = bond->dev;
- unsigned long features = bond_dev->features;
- unsigned long vlan_features = 0;
+ u32 features = bond_dev->features;
+ u32 vlan_features = 0;
unsigned short max_hard_header_len = max((u16)ETH_HLEN,
bond_dev->hard_header_len);
int i;
@@ -1400,8 +1400,8 @@ static int bond_compute_features(struct bonding *bond)
done:
features |= (bond_dev->features & BOND_VLAN_FEATURES);
- bond_dev->features = netdev_fix_features(features, NULL);
- bond_dev->vlan_features = netdev_fix_features(vlan_features, NULL);
+ bond_dev->features = netdev_fix_features(bond_dev, features);
+ bond_dev->vlan_features = netdev_fix_features(bond_dev, vlan_features);
bond_dev->hard_header_len = max_hard_header_len;
return 0;
@@ -1594,9 +1594,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
}
- res = netdev_set_master(slave_dev, bond_dev);
+ res = netdev_set_bond_master(slave_dev, bond_dev);
if (res) {
- pr_debug("Error %d calling netdev_set_master\n", res);
+ pr_debug("Error %d calling netdev_set_bond_master\n", res);
goto err_restore_mac;
}
/* open the slave since the application closed it */
@@ -1812,7 +1812,7 @@ err_close:
dev_close(slave_dev);
err_unset_master:
- netdev_set_master(slave_dev, NULL);
+ netdev_set_bond_master(slave_dev, NULL);
err_restore_mac:
if (!bond->params.fail_over_mac) {
@@ -1992,7 +1992,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
netif_addr_unlock_bh(bond_dev);
}
- netdev_set_master(slave_dev, NULL);
+ netdev_set_bond_master(slave_dev, NULL);
#ifdef CONFIG_NET_POLL_CONTROLLER
read_lock_bh(&bond->lock);
@@ -2114,7 +2114,7 @@ static int bond_release_all(struct net_device *bond_dev)
netif_addr_unlock_bh(bond_dev);
}
- netdev_set_master(slave_dev, NULL);
+ netdev_set_bond_master(slave_dev, NULL);
/* close slave before restoring its mac address */
dev_close(slave_dev);
@@ -4657,6 +4657,8 @@ static const struct net_device_ops bond_netdev_ops = {
.ndo_netpoll_cleanup = bond_netpoll_cleanup,
.ndo_poll_controller = bond_poll_controller,
#endif
+ .ndo_add_slave = bond_enslave,
+ .ndo_del_slave = bond_release,
};
static void bond_destructor(struct net_device *bond_dev)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 8fd0174..72bb0f6 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1198,7 +1198,7 @@ static ssize_t bonding_store_carrier(struct device *d,
bond->dev->name, new_value);
}
out:
- return count;
+ return ret;
}
static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
bonding_show_carrier, bonding_store_carrier);
@@ -1595,7 +1595,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
}
}
out:
- return count;
+ return ret;
}
static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
bonding_show_slaves_active, bonding_store_slaves_active);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 5dec456..1d699e3 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -115,6 +115,8 @@ source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
+source "drivers/net/can/c_can/Kconfig"
+
source "drivers/net/can/usb/Kconfig"
source "drivers/net/can/softing/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 53c82a7..24ebfe8 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -13,6 +13,7 @@ obj-y += softing/
obj-$(CONFIG_CAN_SJA1000) += sja1000/
obj-$(CONFIG_CAN_MSCAN) += mscan/
+obj-$(CONFIG_CAN_C_CAN) += c_can/
obj-$(CONFIG_CAN_AT91) += at91_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
new file mode 100644
index 0000000..ffb9773
--- /dev/null
+++ b/drivers/net/can/c_can/Kconfig
@@ -0,0 +1,15 @@
+menuconfig CAN_C_CAN
+ tristate "Bosch C_CAN devices"
+ depends on CAN_DEV && HAS_IOMEM
+
+if CAN_C_CAN
+
+config CAN_C_CAN_PLATFORM
+ tristate "Generic Platform Bus based C_CAN driver"
+ ---help---
+ This driver adds support for the C_CAN chips connected to
+ the "platform bus" (Linux abstraction for directly to the
+ processor attached devices) which can be found on various
+ boards from ST Microelectronics (http://www.st.com)
+ like the SPEAr1310 and SPEAr320 evaluation boards.
+endif
diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile
new file mode 100644
index 0000000..9273f6d
--- /dev/null
+++ b/drivers/net/can/c_can/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the Bosch C_CAN controller drivers.
+#
+
+obj-$(CONFIG_CAN_C_CAN) += c_can.o
+obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
new file mode 100644
index 0000000..14050786
--- /dev/null
+++ b/drivers/net/can/c_can/c_can.c
@@ -0,0 +1,1158 @@
+/*
+ * CAN bus driver for Bosch C_CAN controller
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Bhupesh Sharma <bhupesh.sharma@st.com>
+ *
+ * Borrowed heavily from the C_CAN driver originally written by:
+ * Copyright (C) 2007
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
+ * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
+ *
+ * TX and RX NAPI implementation has been borrowed from at91 CAN driver
+ * written by:
+ * Copyright
+ * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
+ * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
+ * Bosch C_CAN user manual can be obtained from:
+ * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
+ * users_manual_c_can.pdf
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#include "c_can.h"
+
+/* control register */
+#define CONTROL_TEST BIT(7)
+#define CONTROL_CCE BIT(6)
+#define CONTROL_DISABLE_AR BIT(5)
+#define CONTROL_ENABLE_AR (0 << 5)
+#define CONTROL_EIE BIT(3)
+#define CONTROL_SIE BIT(2)
+#define CONTROL_IE BIT(1)
+#define CONTROL_INIT BIT(0)
+
+/* test register */
+#define TEST_RX BIT(7)
+#define TEST_TX1 BIT(6)
+#define TEST_TX2 BIT(5)
+#define TEST_LBACK BIT(4)
+#define TEST_SILENT BIT(3)
+#define TEST_BASIC BIT(2)
+
+/* status register */
+#define STATUS_BOFF BIT(7)
+#define STATUS_EWARN BIT(6)
+#define STATUS_EPASS BIT(5)
+#define STATUS_RXOK BIT(4)
+#define STATUS_TXOK BIT(3)
+
+/* error counter register */
+#define ERR_CNT_TEC_MASK 0xff
+#define ERR_CNT_TEC_SHIFT 0
+#define ERR_CNT_REC_SHIFT 8
+#define ERR_CNT_REC_MASK (0x7f << ERR_CNT_REC_SHIFT)
+#define ERR_CNT_RP_SHIFT 15
+#define ERR_CNT_RP_MASK (0x1 << ERR_CNT_RP_SHIFT)
+
+/* bit-timing register */
+#define BTR_BRP_MASK 0x3f
+#define BTR_BRP_SHIFT 0
+#define BTR_SJW_SHIFT 6
+#define BTR_SJW_MASK (0x3 << BTR_SJW_SHIFT)
+#define BTR_TSEG1_SHIFT 8
+#define BTR_TSEG1_MASK (0xf << BTR_TSEG1_SHIFT)
+#define BTR_TSEG2_SHIFT 12
+#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
+
+/* brp extension register */
+#define BRP_EXT_BRPE_MASK 0x0f
+#define BRP_EXT_BRPE_SHIFT 0
+
+/* IFx command request */
+#define IF_COMR_BUSY BIT(15)
+
+/* IFx command mask */
+#define IF_COMM_WR BIT(7)
+#define IF_COMM_MASK BIT(6)
+#define IF_COMM_ARB BIT(5)
+#define IF_COMM_CONTROL BIT(4)
+#define IF_COMM_CLR_INT_PND BIT(3)
+#define IF_COMM_TXRQST BIT(2)
+#define IF_COMM_DATAA BIT(1)
+#define IF_COMM_DATAB BIT(0)
+#define IF_COMM_ALL (IF_COMM_MASK | IF_COMM_ARB | \
+ IF_COMM_CONTROL | IF_COMM_TXRQST | \
+ IF_COMM_DATAA | IF_COMM_DATAB)
+
+/* IFx arbitration */
+#define IF_ARB_MSGVAL BIT(15)
+#define IF_ARB_MSGXTD BIT(14)
+#define IF_ARB_TRANSMIT BIT(13)
+
+/* IFx message control */
+#define IF_MCONT_NEWDAT BIT(15)
+#define IF_MCONT_MSGLST BIT(14)
+#define IF_MCONT_CLR_MSGLST (0 << 14)
+#define IF_MCONT_INTPND BIT(13)
+#define IF_MCONT_UMASK BIT(12)
+#define IF_MCONT_TXIE BIT(11)
+#define IF_MCONT_RXIE BIT(10)
+#define IF_MCONT_RMTEN BIT(9)
+#define IF_MCONT_TXRQST BIT(8)
+#define IF_MCONT_EOB BIT(7)
+#define IF_MCONT_DLC_MASK 0xf
+
+/*
+ * IFx register masks:
+ * allow easy operation on 16-bit registers when the
+ * argument is 32-bit instead
+ */
+#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF)
+#define IFX_WRITE_HIGH_16BIT(x) (((x) & 0xFFFF0000) >> 16)
+
+/* message object split */
+#define C_CAN_NO_OF_OBJECTS 32
+#define C_CAN_MSG_OBJ_RX_NUM 16
+#define C_CAN_MSG_OBJ_TX_NUM 16
+
+#define C_CAN_MSG_OBJ_RX_FIRST 1
+#define C_CAN_MSG_OBJ_RX_LAST (C_CAN_MSG_OBJ_RX_FIRST + \
+ C_CAN_MSG_OBJ_RX_NUM - 1)
+
+#define C_CAN_MSG_OBJ_TX_FIRST (C_CAN_MSG_OBJ_RX_LAST + 1)
+#define C_CAN_MSG_OBJ_TX_LAST (C_CAN_MSG_OBJ_TX_FIRST + \
+ C_CAN_MSG_OBJ_TX_NUM - 1)
+
+#define C_CAN_MSG_OBJ_RX_SPLIT 9
+#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1)
+
+#define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1)
+#define RECEIVE_OBJECT_BITS 0x0000ffff
+
+/* status interrupt */
+#define STATUS_INTERRUPT 0x8000
+
+/* global interrupt masks */
+#define ENABLE_ALL_INTERRUPTS 1
+#define DISABLE_ALL_INTERRUPTS 0
+
+/* minimum timeout for checking BUSY status */
+#define MIN_TIMEOUT_VALUE 6
+
+/* napi related */
+#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM
+
+/* c_can lec values */
+enum c_can_lec_type {
+ LEC_NO_ERROR = 0,
+ LEC_STUFF_ERROR,
+ LEC_FORM_ERROR,
+ LEC_ACK_ERROR,
+ LEC_BIT1_ERROR,
+ LEC_BIT0_ERROR,
+ LEC_CRC_ERROR,
+ LEC_UNUSED,
+};
+
+/*
+ * c_can error types:
+ * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported
+ */
+enum c_can_bus_error_types {
+ C_CAN_NO_ERROR = 0,
+ C_CAN_BUS_OFF,
+ C_CAN_ERROR_WARNING,
+ C_CAN_ERROR_PASSIVE,
+};
+
+static struct can_bittiming_const c_can_bittiming_const = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
+ .tseg1_max = 16,
+ .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024, /* 6-bit BRP field + 4-bit BRPE field*/
+ .brp_inc = 1,
+};
+
+static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
+{
+ return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
+ C_CAN_MSG_OBJ_TX_FIRST;
+}
+
+static inline int get_tx_echo_msg_obj(const struct c_can_priv *priv)
+{
+ return (priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) +
+ C_CAN_MSG_OBJ_TX_FIRST;
+}
+
+static u32 c_can_read_reg32(struct c_can_priv *priv, void *reg)
+{
+ u32 val = priv->read_reg(priv, reg);
+ val |= ((u32) priv->read_reg(priv, reg + 2)) << 16;
+ return val;
+}
+
+static void c_can_enable_all_interrupts(struct c_can_priv *priv,
+ int enable)
+{
+ unsigned int cntrl_save = priv->read_reg(priv,
+ &priv->regs->control);
+
+ if (enable)
+ cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE);
+ else
+ cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
+
+ priv->write_reg(priv, &priv->regs->control, cntrl_save);
+}
+
+static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
+{
+ int count = MIN_TIMEOUT_VALUE;
+
+ while (count && priv->read_reg(priv,
+ &priv->regs->ifregs[iface].com_req) &
+ IF_COMR_BUSY) {
+ count--;
+ udelay(1);
+ }
+
+ if (!count)
+ return 1;
+
+ return 0;
+}
+
+static inline void c_can_object_get(struct net_device *dev,
+ int iface, int objno, int mask)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ /*
+ * As per specs, after writting the message object number in the
+ * IF command request register the transfer b/w interface
+ * register and message RAM must be complete in 6 CAN-CLK
+ * period.
+ */
+ priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
+ IFX_WRITE_LOW_16BIT(mask));
+ priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
+ IFX_WRITE_LOW_16BIT(objno));
+
+ if (c_can_msg_obj_is_busy(priv, iface))
+ netdev_err(dev, "timed out in object get\n");
+}
+
+static inline void c_can_object_put(struct net_device *dev,
+ int iface, int objno, int mask)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ /*
+ * As per specs, after writting the message object number in the
+ * IF command request register the transfer b/w interface
+ * register and message RAM must be complete in 6 CAN-CLK
+ * period.
+ */
+ priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
+ (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
+ priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
+ IFX_WRITE_LOW_16BIT(objno));
+
+ if (c_can_msg_obj_is_busy(priv, iface))
+ netdev_err(dev, "timed out in object put\n");
+}
+
+static void c_can_write_msg_object(struct net_device *dev,
+ int iface, struct can_frame *frame, int objno)
+{
+ int i;
+ u16 flags = 0;
+ unsigned int id;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ if (!(frame->can_id & CAN_RTR_FLAG))
+ flags |= IF_ARB_TRANSMIT;
+
+ if (frame->can_id & CAN_EFF_FLAG) {
+ id = frame->can_id & CAN_EFF_MASK;
+ flags |= IF_ARB_MSGXTD;
+ } else
+ id = ((frame->can_id & CAN_SFF_MASK) << 18);
+
+ flags |= IF_ARB_MSGVAL;
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
+ IFX_WRITE_LOW_16BIT(id));
+ priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, flags |
+ IFX_WRITE_HIGH_16BIT(id));
+
+ for (i = 0; i < frame->can_dlc; i += 2) {
+ priv->write_reg(priv, &priv->regs->ifregs[iface].data[i / 2],
+ frame->data[i] | (frame->data[i + 1] << 8));
+ }
+
+ /* enable interrupt for this message object */
+ priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
+ frame->can_dlc);
+ c_can_object_put(dev, iface, objno, IF_COMM_ALL);
+}
+
+static inline void c_can_mark_rx_msg_obj(struct net_device *dev,
+ int iface, int ctrl_mask,
+ int obj)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ ctrl_mask & ~(IF_MCONT_MSGLST | IF_MCONT_INTPND));
+ c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
+
+}
+
+static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
+ int iface,
+ int ctrl_mask)
+{
+ int i;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) {
+ priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ ctrl_mask & ~(IF_MCONT_MSGLST |
+ IF_MCONT_INTPND | IF_MCONT_NEWDAT));
+ c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
+ }
+}
+
+static inline void c_can_activate_rx_msg_obj(struct net_device *dev,
+ int iface, int ctrl_mask,
+ int obj)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ ctrl_mask & ~(IF_MCONT_MSGLST |
+ IF_MCONT_INTPND | IF_MCONT_NEWDAT));
+ c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
+}
+
+static void c_can_handle_lost_msg_obj(struct net_device *dev,
+ int iface, int objno)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *skb;
+ struct can_frame *frame;
+
+ netdev_err(dev, "msg lost in buffer %d\n", objno);
+
+ c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ IF_MCONT_CLR_MSGLST);
+
+ c_can_object_put(dev, 0, objno, IF_COMM_CONTROL);
+
+ /* create an error msg */
+ skb = alloc_can_err_skb(dev, &frame);
+ if (unlikely(!skb))
+ return;
+
+ frame->can_id |= CAN_ERR_CRTL;
+ frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_errors++;
+ stats->rx_over_errors++;
+
+ netif_receive_skb(skb);
+}
+
+static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
+{
+ u16 flags, data;
+ int i;
+ unsigned int val;
+ struct c_can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct sk_buff *skb;
+ struct can_frame *frame;
+
+ skb = alloc_can_skb(dev, &frame);
+ if (!skb) {
+ stats->rx_dropped++;
+ return -ENOMEM;
+ }
+
+ frame->can_dlc = get_can_dlc(ctrl & 0x0F);
+
+ flags = priv->read_reg(priv, &priv->regs->ifregs[iface].arb2);
+ val = priv->read_reg(priv, &priv->regs->ifregs[iface].arb1) |
+ (flags << 16);
+
+ if (flags & IF_ARB_MSGXTD)
+ frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ else
+ frame->can_id = (val >> 18) & CAN_SFF_MASK;
+
+ if (flags & IF_ARB_TRANSMIT)
+ frame->can_id |= CAN_RTR_FLAG;
+ else {
+ for (i = 0; i < frame->can_dlc; i += 2) {
+ data = priv->read_reg(priv,
+ &priv->regs->ifregs[iface].data[i / 2]);
+ frame->data[i] = data;
+ frame->data[i + 1] = data >> 8;
+ }
+ }
+
+ netif_receive_skb(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += frame->can_dlc;
+
+ return 0;
+}
+
+static void c_can_setup_receive_object(struct net_device *dev, int iface,
+ int objno, unsigned int mask,
+ unsigned int id, unsigned int mcont)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].mask1,
+ IFX_WRITE_LOW_16BIT(mask));
+ priv->write_reg(priv, &priv->regs->ifregs[iface].mask2,
+ IFX_WRITE_HIGH_16BIT(mask));
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
+ IFX_WRITE_LOW_16BIT(id));
+ priv->write_reg(priv, &priv->regs->ifregs[iface].arb2,
+ (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, mcont);
+ c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
+
+ netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
+ c_can_read_reg32(priv, &priv->regs->msgval1));
+}
+
+static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, 0);
+ priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, 0);
+ priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 0);
+
+ c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
+
+ netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
+ c_can_read_reg32(priv, &priv->regs->msgval1));
+}
+
+static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
+{
+ int val = c_can_read_reg32(priv, &priv->regs->txrqst1);
+
+ /*
+ * as transmission request register's bit n-1 corresponds to
+ * message object n, we need to handle the same properly.
+ */
+ if (val & (1 << (objno - 1)))
+ return 1;
+
+ return 0;
+}
+
+static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ u32 msg_obj_no;
+ struct c_can_priv *priv = netdev_priv(dev);
+ struct can_frame *frame = (struct can_frame *)skb->data;
+
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ msg_obj_no = get_tx_next_msg_obj(priv);
+
+ /* prepare message object for transmission */
+ c_can_write_msg_object(dev, 0, frame, msg_obj_no);
+ can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
+
+ /*
+ * we have to stop the queue in case of a wrap around or
+ * if the next TX message object is still in use
+ */
+ priv->tx_next++;
+ if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
+ (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
+ netif_stop_queue(dev);
+
+ return NETDEV_TX_OK;
+}
+
+static int c_can_set_bittiming(struct net_device *dev)
+{
+ unsigned int reg_btr, reg_brpe, ctrl_save;
+ u8 brp, brpe, sjw, tseg1, tseg2;
+ u32 ten_bit_brp;
+ struct c_can_priv *priv = netdev_priv(dev);
+ const struct can_bittiming *bt = &priv->can.bittiming;
+
+ /* c_can provides a 6-bit brp and 4-bit brpe fields */
+ ten_bit_brp = bt->brp - 1;
+ brp = ten_bit_brp & BTR_BRP_MASK;
+ brpe = ten_bit_brp >> 6;
+
+ sjw = bt->sjw - 1;
+ tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
+ tseg2 = bt->phase_seg2 - 1;
+ reg_btr = brp | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) |
+ (tseg2 << BTR_TSEG2_SHIFT);
+ reg_brpe = brpe & BRP_EXT_BRPE_MASK;
+
+ netdev_info(dev,
+ "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
+
+ ctrl_save = priv->read_reg(priv, &priv->regs->control);
+ priv->write_reg(priv, &priv->regs->control,
+ ctrl_save | CONTROL_CCE | CONTROL_INIT);
+ priv->write_reg(priv, &priv->regs->btr, reg_btr);
+ priv->write_reg(priv, &priv->regs->brp_ext, reg_brpe);
+ priv->write_reg(priv, &priv->regs->control, ctrl_save);
+
+ return 0;
+}
+
+/*
+ * Configure C_CAN message objects for Tx and Rx purposes:
+ * C_CAN provides a total of 32 message objects that can be configured
+ * either for Tx or Rx purposes. Here the first 16 message objects are used as
+ * a reception FIFO. The end of reception FIFO is signified by the EoB bit
+ * being SET. The remaining 16 message objects are kept aside for Tx purposes.
+ * See user guide document for further details on configuring message
+ * objects.
+ */
+static void c_can_configure_msg_objects(struct net_device *dev)
+{
+ int i;
+
+ /* first invalidate all message objects */
+ for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
+ c_can_inval_msg_object(dev, 0, i);
+
+ /* setup receive message objects */
+ for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
+ c_can_setup_receive_object(dev, 0, i, 0, 0,
+ (IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB);
+
+ c_can_setup_receive_object(dev, 0, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
+ IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK);
+}
+
+/*
+ * Configure C_CAN chip:
+ * - enable/disable auto-retransmission
+ * - set operating mode
+ * - configure message objects
+ */
+static void c_can_chip_config(struct net_device *dev)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ /* disable automatic retransmission */
+ priv->write_reg(priv, &priv->regs->control,
+ CONTROL_DISABLE_AR);
+ else
+ /* enable automatic retransmission */
+ priv->write_reg(priv, &priv->regs->control,
+ CONTROL_ENABLE_AR);
+
+ if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
+ CAN_CTRLMODE_LOOPBACK)) {
+ /* loopback + silent mode : useful for hot self-test */
+ priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+ CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+ priv->write_reg(priv, &priv->regs->test,
+ TEST_LBACK | TEST_SILENT);
+ } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+ /* loopback mode : useful for self-test function */
+ priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+ CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+ priv->write_reg(priv, &priv->regs->test, TEST_LBACK);
+ } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+ /* silent mode : bus-monitoring mode */
+ priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+ CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+ priv->write_reg(priv, &priv->regs->test, TEST_SILENT);
+ } else
+ /* normal mode*/
+ priv->write_reg(priv, &priv->regs->control,
+ CONTROL_EIE | CONTROL_SIE | CONTROL_IE);
+
+ /* configure message objects */
+ c_can_configure_msg_objects(dev);
+
+ /* set a `lec` value so that we can check for updates later */
+ priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
+
+ /* set bittiming params */
+ c_can_set_bittiming(dev);
+}
+
+static void c_can_start(struct net_device *dev)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ /* enable status change, error and module interrupts */
+ c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
+
+ /* basic c_can configuration */
+ c_can_chip_config(dev);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* reset tx helper pointers */
+ priv->tx_next = priv->tx_echo = 0;
+}
+
+static void c_can_stop(struct net_device *dev)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ /* disable all interrupts */
+ c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+
+ /* set the state as STOPPED */
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
+{
+ switch (mode) {
+ case CAN_MODE_START:
+ c_can_start(dev);
+ netif_wake_queue(dev);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int c_can_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ unsigned int reg_err_counter;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
+ bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
+ ERR_CNT_REC_SHIFT;
+ bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
+
+ return 0;
+}
+
+/*
+ * theory of operation:
+ *
+ * priv->tx_echo holds the number of the oldest can_frame put for
+ * transmission into the hardware, but not yet ACKed by the CAN tx
+ * complete IRQ.
+ *
+ * We iterate from priv->tx_echo to priv->tx_next and check if the
+ * packet has been transmitted, echo it back to the CAN framework.
+ * If we discover a not yet transmitted package, stop looking for more.
+ */
+static void c_can_do_tx(struct net_device *dev)
+{
+ u32 val;
+ u32 msg_obj_no;
+ struct c_can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+
+ for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
+ msg_obj_no = get_tx_echo_msg_obj(priv);
+ c_can_inval_msg_object(dev, 0, msg_obj_no);
+ val = c_can_read_reg32(priv, &priv->regs->txrqst1);
+ if (!(val & (1 << msg_obj_no))) {
+ can_get_echo_skb(dev,
+ msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
+ stats->tx_bytes += priv->read_reg(priv,
+ &priv->regs->ifregs[0].msg_cntrl)
+ & IF_MCONT_DLC_MASK;
+ stats->tx_packets++;
+ }
+ }
+
+ /* restart queue if wrap-up or if queue stalled on last pkt */
+ if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
+ ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
+ netif_wake_queue(dev);
+}
+
+/*
+ * theory of operation:
+ *
+ * c_can core saves a received CAN message into the first free message
+ * object it finds free (starting with the lowest). Bits NEWDAT and
+ * INTPND are set for this message object indicating that a new message
+ * has arrived. To work-around this issue, we keep two groups of message
+ * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
+ *
+ * To ensure in-order frame reception we use the following
+ * approach while re-activating a message object to receive further
+ * frames:
+ * - if the current message object number is lower than
+ * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
+ * the INTPND bit.
+ * - if the current message object number is equal to
+ * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
+ * receive message objects.
+ * - if the current message object number is greater than
+ * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
+ * only this message object.
+ */
+static int c_can_do_rx_poll(struct net_device *dev, int quota)
+{
+ u32 num_rx_pkts = 0;
+ unsigned int msg_obj, msg_ctrl_save;
+ struct c_can_priv *priv = netdev_priv(dev);
+ u32 val = c_can_read_reg32(priv, &priv->regs->intpnd1);
+
+ for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST;
+ msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0;
+ val = c_can_read_reg32(priv, &priv->regs->intpnd1),
+ msg_obj++) {
+ /*
+ * as interrupt pending register's bit n-1 corresponds to
+ * message object n, we need to handle the same properly.
+ */
+ if (val & (1 << (msg_obj - 1))) {
+ c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL &
+ ~IF_COMM_TXRQST);
+ msg_ctrl_save = priv->read_reg(priv,
+ &priv->regs->ifregs[0].msg_cntrl);
+
+ if (msg_ctrl_save & IF_MCONT_EOB)
+ return num_rx_pkts;
+
+ if (msg_ctrl_save & IF_MCONT_MSGLST) {
+ c_can_handle_lost_msg_obj(dev, 0, msg_obj);
+ num_rx_pkts++;
+ quota--;
+ continue;
+ }
+
+ if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
+ continue;
+
+ /* read the data from the message object */
+ c_can_read_msg_object(dev, 0, msg_ctrl_save);
+
+ if (msg_obj < C_CAN_MSG_RX_LOW_LAST)
+ c_can_mark_rx_msg_obj(dev, 0,
+ msg_ctrl_save, msg_obj);
+ else if (msg_obj > C_CAN_MSG_RX_LOW_LAST)
+ /* activate this msg obj */
+ c_can_activate_rx_msg_obj(dev, 0,
+ msg_ctrl_save, msg_obj);
+ else if (msg_obj == C_CAN_MSG_RX_LOW_LAST)
+ /* activate all lower message objects */
+ c_can_activate_all_lower_rx_msg_obj(dev,
+ 0, msg_ctrl_save);
+
+ num_rx_pkts++;
+ quota--;
+ }
+ }
+
+ return num_rx_pkts;
+}
+
+static inline int c_can_has_and_handle_berr(struct c_can_priv *priv)
+{
+ return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
+ (priv->current_status & LEC_UNUSED);
+}
+
+static int c_can_handle_state_change(struct net_device *dev,
+ enum c_can_bus_error_types error_type)
+{
+ unsigned int reg_err_counter;
+ unsigned int rx_err_passive;
+ struct c_can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct can_berr_counter bec;
+
+ /* propogate the error condition to the CAN stack */
+ skb = alloc_can_err_skb(dev, &cf);
+ if (unlikely(!skb))
+ return 0;
+
+ c_can_get_berr_counter(dev, &bec);
+ reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
+ rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
+ ERR_CNT_RP_SHIFT;
+
+ switch (error_type) {
+ case C_CAN_ERROR_WARNING:
+ /* error warning state */
+ priv->can.can_stats.error_warning++;
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = (bec.txerr > bec.rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+
+ break;
+ case C_CAN_ERROR_PASSIVE:
+ /* error passive state */
+ priv->can.can_stats.error_passive++;
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ cf->can_id |= CAN_ERR_CRTL;
+ if (rx_err_passive)
+ cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+ if (bec.txerr > 127)
+ cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
+
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ break;
+ case C_CAN_BUS_OFF:
+ /* bus-off state */
+ priv->can.state = CAN_STATE_BUS_OFF;
+ cf->can_id |= CAN_ERR_BUSOFF;
+ /*
+ * disable all interrupts in bus-off mode to ensure that
+ * the CPU is not hogged down
+ */
+ c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+ can_bus_off(dev);
+ break;
+ default:
+ break;
+ }
+
+ netif_receive_skb(skb);
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ return 1;
+}
+
+static int c_can_handle_bus_err(struct net_device *dev,
+ enum c_can_lec_type lec_type)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ /*
+ * early exit if no lec update or no error.
+ * no lec update means that no CAN bus event has been detected
+ * since CPU wrote 0x7 value to status reg.
+ */
+ if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
+ return 0;
+
+ /* propogate the error condition to the CAN stack */
+ skb = alloc_can_err_skb(dev, &cf);
+ if (unlikely(!skb))
+ return 0;
+
+ /*
+ * check for 'last error code' which tells us the
+ * type of the last error to occur on the CAN bus
+ */
+
+ /* common for all type of bus errors */
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+
+ switch (lec_type) {
+ case LEC_STUFF_ERROR:
+ netdev_dbg(dev, "stuff error\n");
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ case LEC_FORM_ERROR:
+ netdev_dbg(dev, "form error\n");
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case LEC_ACK_ERROR:
+ netdev_dbg(dev, "ack error\n");
+ cf->data[2] |= (CAN_ERR_PROT_LOC_ACK |
+ CAN_ERR_PROT_LOC_ACK_DEL);
+ break;
+ case LEC_BIT1_ERROR:
+ netdev_dbg(dev, "bit1 error\n");
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ break;
+ case LEC_BIT0_ERROR:
+ netdev_dbg(dev, "bit0 error\n");
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ break;
+ case LEC_CRC_ERROR:
+ netdev_dbg(dev, "CRC error\n");
+ cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL);
+ break;
+ default:
+ break;
+ }
+
+ /* set a `lec` value so that we can check for updates later */
+ priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
+
+ netif_receive_skb(skb);
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ return 1;
+}
+
+static int c_can_poll(struct napi_struct *napi, int quota)
+{
+ u16 irqstatus;
+ int lec_type = 0;
+ int work_done = 0;
+ struct net_device *dev = napi->dev;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+ if (!irqstatus)
+ goto end;
+
+ /* status events have the highest priority */
+ if (irqstatus == STATUS_INTERRUPT) {
+ priv->current_status = priv->read_reg(priv,
+ &priv->regs->status);
+
+ /* handle Tx/Rx events */
+ if (priv->current_status & STATUS_TXOK)
+ priv->write_reg(priv, &priv->regs->status,
+ priv->current_status & ~STATUS_TXOK);
+
+ if (priv->current_status & STATUS_RXOK)
+ priv->write_reg(priv, &priv->regs->status,
+ priv->current_status & ~STATUS_RXOK);
+
+ /* handle state changes */
+ if ((priv->current_status & STATUS_EWARN) &&
+ (!(priv->last_status & STATUS_EWARN))) {
+ netdev_dbg(dev, "entered error warning state\n");
+ work_done += c_can_handle_state_change(dev,
+ C_CAN_ERROR_WARNING);
+ }
+ if ((priv->current_status & STATUS_EPASS) &&
+ (!(priv->last_status & STATUS_EPASS))) {
+ netdev_dbg(dev, "entered error passive state\n");
+ work_done += c_can_handle_state_change(dev,
+ C_CAN_ERROR_PASSIVE);
+ }
+ if ((priv->current_status & STATUS_BOFF) &&
+ (!(priv->last_status & STATUS_BOFF))) {
+ netdev_dbg(dev, "entered bus off state\n");
+ work_done += c_can_handle_state_change(dev,
+ C_CAN_BUS_OFF);
+ }
+
+ /* handle bus recovery events */
+ if ((!(priv->current_status & STATUS_BOFF)) &&
+ (priv->last_status & STATUS_BOFF)) {
+ netdev_dbg(dev, "left bus off state\n");
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ }
+ if ((!(priv->current_status & STATUS_EPASS)) &&
+ (priv->last_status & STATUS_EPASS)) {
+ netdev_dbg(dev, "left error passive state\n");
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ }
+
+ priv->last_status = priv->current_status;
+
+ /* handle lec errors on the bus */
+ lec_type = c_can_has_and_handle_berr(priv);
+ if (lec_type)
+ work_done += c_can_handle_bus_err(dev, lec_type);
+ } else if ((irqstatus >= C_CAN_MSG_OBJ_RX_FIRST) &&
+ (irqstatus <= C_CAN_MSG_OBJ_RX_LAST)) {
+ /* handle events corresponding to receive message objects */
+ work_done += c_can_do_rx_poll(dev, (quota - work_done));
+ } else if ((irqstatus >= C_CAN_MSG_OBJ_TX_FIRST) &&
+ (irqstatus <= C_CAN_MSG_OBJ_TX_LAST)) {
+ /* handle events corresponding to transmit message objects */
+ c_can_do_tx(dev);
+ }
+
+end:
+ if (work_done < quota) {
+ napi_complete(napi);
+ /* enable all IRQs */
+ c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
+ }
+
+ return work_done;
+}
+
+static irqreturn_t c_can_isr(int irq, void *dev_id)
+{
+ u16 irqstatus;
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+ if (!irqstatus)
+ return IRQ_NONE;
+
+ /* disable all interrupts and schedule the NAPI */
+ c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+ napi_schedule(&priv->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int c_can_open(struct net_device *dev)
+{
+ int err;
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ /* open the can device */
+ err = open_candev(dev);
+ if (err) {
+ netdev_err(dev, "failed to open can device\n");
+ return err;
+ }
+
+ /* register interrupt handler */
+ err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name,
+ dev);
+ if (err < 0) {
+ netdev_err(dev, "failed to request interrupt\n");
+ goto exit_irq_fail;
+ }
+
+ /* start the c_can controller */
+ c_can_start(dev);
+
+ napi_enable(&priv->napi);
+ netif_start_queue(dev);
+
+ return 0;
+
+exit_irq_fail:
+ close_candev(dev);
+ return err;
+}
+
+static int c_can_close(struct net_device *dev)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ napi_disable(&priv->napi);
+ c_can_stop(dev);
+ free_irq(dev->irq, dev);
+ close_candev(dev);
+
+ return 0;
+}
+
+struct net_device *alloc_c_can_dev(void)
+{
+ struct net_device *dev;
+ struct c_can_priv *priv;
+
+ dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM);
+ if (!dev)
+ return NULL;
+
+ priv = netdev_priv(dev);
+ netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
+
+ priv->dev = dev;
+ priv->can.bittiming_const = &c_can_bittiming_const;
+ priv->can.do_set_mode = c_can_set_mode;
+ priv->can.do_get_berr_counter = c_can_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_ONE_SHOT |
+ CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_BERR_REPORTING;
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(alloc_c_can_dev);
+
+void free_c_can_dev(struct net_device *dev)
+{
+ free_candev(dev);
+}
+EXPORT_SYMBOL_GPL(free_c_can_dev);
+
+static const struct net_device_ops c_can_netdev_ops = {
+ .ndo_open = c_can_open,
+ .ndo_stop = c_can_close,
+ .ndo_start_xmit = c_can_start_xmit,
+};
+
+int register_c_can_dev(struct net_device *dev)
+{
+ dev->flags |= IFF_ECHO; /* we support local echo */
+ dev->netdev_ops = &c_can_netdev_ops;
+
+ return register_candev(dev);
+}
+EXPORT_SYMBOL_GPL(register_c_can_dev);
+
+void unregister_c_can_dev(struct net_device *dev)
+{
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ /* disable all interrupts */
+ c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+
+ unregister_candev(dev);
+}
+EXPORT_SYMBOL_GPL(unregister_c_can_dev);
+
+MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CAN bus driver for Bosch C_CAN controller");
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
new file mode 100644
index 0000000..9b7fbef
--- /dev/null
+++ b/drivers/net/can/c_can/c_can.h
@@ -0,0 +1,86 @@
+/*
+ * CAN bus driver for Bosch C_CAN controller
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Bhupesh Sharma <bhupesh.sharma@st.com>
+ *
+ * Borrowed heavily from the C_CAN driver originally written by:
+ * Copyright (C) 2007
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
+ * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
+ *
+ * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
+ * Bosch C_CAN user manual can be obtained from:
+ * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
+ * users_manual_c_can.pdf
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef C_CAN_H
+#define C_CAN_H
+
+/* c_can IF registers */
+struct c_can_if_regs {
+ u16 com_req;
+ u16 com_mask;
+ u16 mask1;
+ u16 mask2;
+ u16 arb1;
+ u16 arb2;
+ u16 msg_cntrl;
+ u16 data[4];
+ u16 _reserved[13];
+};
+
+/* c_can hardware registers */
+struct c_can_regs {
+ u16 control;
+ u16 status;
+ u16 err_cnt;
+ u16 btr;
+ u16 interrupt;
+ u16 test;
+ u16 brp_ext;
+ u16 _reserved1;
+ struct c_can_if_regs ifregs[2]; /* [0] = IF1 and [1] = IF2 */
+ u16 _reserved2[8];
+ u16 txrqst1;
+ u16 txrqst2;
+ u16 _reserved3[6];
+ u16 newdat1;
+ u16 newdat2;
+ u16 _reserved4[6];
+ u16 intpnd1;
+ u16 intpnd2;
+ u16 _reserved5[6];
+ u16 msgval1;
+ u16 msgval2;
+ u16 _reserved6[6];
+};
+
+/* c_can private data structure */
+struct c_can_priv {
+ struct can_priv can; /* must be the first member */
+ struct napi_struct napi;
+ struct net_device *dev;
+ int tx_object;
+ int current_status;
+ int last_status;
+ u16 (*read_reg) (struct c_can_priv *priv, void *reg);
+ void (*write_reg) (struct c_can_priv *priv, void *reg, u16 val);
+ struct c_can_regs __iomem *regs;
+ unsigned long irq_flags; /* for request_irq() */
+ unsigned int tx_next;
+ unsigned int tx_echo;
+ void *priv; /* for board-specific data */
+};
+
+struct net_device *alloc_c_can_dev(void);
+void free_c_can_dev(struct net_device *dev);
+int register_c_can_dev(struct net_device *dev);
+void unregister_c_can_dev(struct net_device *dev);
+
+#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
new file mode 100644
index 0000000..e629b96
--- /dev/null
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -0,0 +1,215 @@
+/*
+ * Platform CAN bus driver for Bosch C_CAN controller
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Bhupesh Sharma <bhupesh.sharma@st.com>
+ *
+ * Borrowed heavily from the C_CAN driver originally written by:
+ * Copyright (C) 2007
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
+ * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
+ *
+ * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
+ * Bosch C_CAN user manual can be obtained from:
+ * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
+ * users_manual_c_can.pdf
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#include <linux/can/dev.h>
+
+#include "c_can.h"
+
+/*
+ * 16-bit c_can registers can be arranged differently in the memory
+ * architecture of different implementations. For example: 16-bit
+ * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
+ * Handle the same by providing a common read/write interface.
+ */
+static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv,
+ void *reg)
+{
+ return readw(reg);
+}
+
+static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv,
+ void *reg, u16 val)
+{
+ writew(val, reg);
+}
+
+static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv,
+ void *reg)
+{
+ return readw(reg + (long)reg - (long)priv->regs);
+}
+
+static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
+ void *reg, u16 val)
+{
+ writew(val, reg + (long)reg - (long)priv->regs);
+}
+
+static int __devinit c_can_plat_probe(struct platform_device *pdev)
+{
+ int ret;
+ void __iomem *addr;
+ struct net_device *dev;
+ struct c_can_priv *priv;
+ struct resource *mem, *irq;
+#ifdef CONFIG_HAVE_CLK
+ struct clk *clk;
+
+ /* get the appropriate clk */
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "no clock defined\n");
+ ret = -ENODEV;
+ goto exit;
+ }
+#endif
+
+ /* get the platform data */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!mem || (irq <= 0)) {
+ ret = -ENODEV;
+ goto exit_free_clk;
+ }
+
+ if (!request_mem_region(mem->start, resource_size(mem),
+ KBUILD_MODNAME)) {
+ dev_err(&pdev->dev, "resource unavailable\n");
+ ret = -ENODEV;
+ goto exit_free_clk;
+ }
+
+ addr = ioremap(mem->start, resource_size(mem));
+ if (!addr) {
+ dev_err(&pdev->dev, "failed to map can port\n");
+ ret = -ENOMEM;
+ goto exit_release_mem;
+ }
+
+ /* allocate the c_can device */
+ dev = alloc_c_can_dev();
+ if (!dev) {
+ ret = -ENOMEM;
+ goto exit_iounmap;
+ }
+
+ priv = netdev_priv(dev);
+
+ dev->irq = irq->start;
+ priv->regs = addr;
+#ifdef CONFIG_HAVE_CLK
+ priv->can.clock.freq = clk_get_rate(clk);
+ priv->priv = clk;
+#endif
+
+ switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
+ case IORESOURCE_MEM_32BIT:
+ priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
+ priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
+ break;
+ case IORESOURCE_MEM_16BIT:
+ default:
+ priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
+ priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+ break;
+ }
+
+ platform_set_drvdata(pdev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ ret = register_c_can_dev(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
+ KBUILD_MODNAME, ret);
+ goto exit_free_device;
+ }
+
+ dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
+ KBUILD_MODNAME, priv->regs, dev->irq);
+ return 0;
+
+exit_free_device:
+ platform_set_drvdata(pdev, NULL);
+ free_c_can_dev(dev);
+exit_iounmap:
+ iounmap(addr);
+exit_release_mem:
+ release_mem_region(mem->start, resource_size(mem));
+exit_free_clk:
+#ifdef CONFIG_HAVE_CLK
+ clk_put(clk);
+exit:
+#endif
+ dev_err(&pdev->dev, "probe failed\n");
+
+ return ret;
+}
+
+static int __devexit c_can_plat_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct c_can_priv *priv = netdev_priv(dev);
+ struct resource *mem;
+
+ unregister_c_can_dev(dev);
+ platform_set_drvdata(pdev, NULL);
+
+ free_c_can_dev(dev);
+ iounmap(priv->regs);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+
+#ifdef CONFIG_HAVE_CLK
+ clk_put(priv->priv);
+#endif
+
+ return 0;
+}
+
+static struct platform_driver c_can_plat_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = c_can_plat_probe,
+ .remove = __devexit_p(c_can_plat_remove),
+};
+
+static int __init c_can_plat_init(void)
+{
+ return platform_driver_register(&c_can_plat_driver);
+}
+module_init(c_can_plat_init);
+
+static void __exit c_can_plat_exit(void)
+{
+ platform_driver_unregister(&c_can_plat_driver);
+}
+module_exit(c_can_plat_exit);
+
+MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Platform CAN bus driver for Bosch C_CAN controller");
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 7ff170c..2d2d28f 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -65,7 +65,14 @@ static LIST_HEAD(cnic_udev_list);
static DEFINE_RWLOCK(cnic_dev_lock);
static DEFINE_MUTEX(cnic_lock);
-static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
+static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
+
+/* helper function, assuming cnic_lock is held */
+static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
+{
+ return rcu_dereference_protected(cnic_ulp_tbl[type],
+ lockdep_is_held(&cnic_lock));
+}
static int cnic_service_bnx2(void *, void *);
static int cnic_service_bnx2x(void *, void *);
@@ -435,7 +442,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
return -EINVAL;
}
mutex_lock(&cnic_lock);
- if (cnic_ulp_tbl[ulp_type]) {
+ if (cnic_ulp_tbl_prot(ulp_type)) {
pr_err("%s: Type %d has already been registered\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
@@ -478,7 +485,7 @@ int cnic_unregister_driver(int ulp_type)
return -EINVAL;
}
mutex_lock(&cnic_lock);
- ulp_ops = cnic_ulp_tbl[ulp_type];
+ ulp_ops = cnic_ulp_tbl_prot(ulp_type);
if (!ulp_ops) {
pr_err("%s: Type %d has not been registered\n",
__func__, ulp_type);
@@ -529,7 +536,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
return -EINVAL;
}
mutex_lock(&cnic_lock);
- if (cnic_ulp_tbl[ulp_type] == NULL) {
+ if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
pr_err("%s: Driver with type %d has not been registered\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
@@ -544,7 +551,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
cp->ulp_handle[ulp_type] = ulp_ctx;
- ulp_ops = cnic_ulp_tbl[ulp_type];
+ ulp_ops = cnic_ulp_tbl_prot(ulp_type);
rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
cnic_hold(dev);
@@ -2953,7 +2960,8 @@ static void cnic_ulp_stop(struct cnic_dev *dev)
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
- ulp_ops = cp->ulp_ops[if_type];
+ ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+ lockdep_is_held(&cnic_lock));
if (!ulp_ops) {
mutex_unlock(&cnic_lock);
continue;
@@ -2977,7 +2985,8 @@ static void cnic_ulp_start(struct cnic_dev *dev)
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
- ulp_ops = cp->ulp_ops[if_type];
+ ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+ lockdep_is_held(&cnic_lock));
if (!ulp_ops || !ulp_ops->cnic_start) {
mutex_unlock(&cnic_lock);
continue;
@@ -3041,7 +3050,7 @@ static void cnic_ulp_init(struct cnic_dev *dev)
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
- ulp_ops = cnic_ulp_tbl[i];
+ ulp_ops = cnic_ulp_tbl_prot(i);
if (!ulp_ops || !ulp_ops->cnic_init) {
mutex_unlock(&cnic_lock);
continue;
@@ -3065,7 +3074,7 @@ static void cnic_ulp_exit(struct cnic_dev *dev)
struct cnic_ulp_ops *ulp_ops;
mutex_lock(&cnic_lock);
- ulp_ops = cnic_ulp_tbl[i];
+ ulp_ops = cnic_ulp_tbl_prot(i);
if (!ulp_ops || !ulp_ops->cnic_exit) {
mutex_unlock(&cnic_lock);
continue;
@@ -4170,6 +4179,14 @@ static void cnic_enable_bnx2_int(struct cnic_dev *dev)
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
}
+static void cnic_get_bnx2_iscsi_info(struct cnic_dev *dev)
+{
+ u32 max_conn;
+
+ max_conn = cnic_reg_rd_ind(dev, BNX2_FW_MAX_ISCSI_CONN);
+ dev->max_iscsi_conn = max_conn;
+}
+
static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
@@ -4494,6 +4511,8 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
return err;
}
+ cnic_get_bnx2_iscsi_info(dev);
+
return 0;
}
@@ -4705,129 +4724,6 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
cp->rx_cons = *cp->rx_cons_ptr;
}
-static int cnic_read_bnx2x_iscsi_mac(struct cnic_dev *dev, u32 upper_addr,
- u32 lower_addr)
-{
- u32 val;
- u8 mac[6];
-
- val = CNIC_RD(dev, upper_addr);
-
- mac[0] = (u8) (val >> 8);
- mac[1] = (u8) val;
-
- val = CNIC_RD(dev, lower_addr);
-
- mac[2] = (u8) (val >> 24);
- mac[3] = (u8) (val >> 16);
- mac[4] = (u8) (val >> 8);
- mac[5] = (u8) val;
-
- if (is_valid_ether_addr(mac)) {
- memcpy(dev->mac_addr, mac, 6);
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
-{
- struct cnic_local *cp = dev->cnic_priv;
- u32 base, base2, addr, addr1, val;
- int port = CNIC_PORT(cp);
-
- dev->max_iscsi_conn = 0;
- base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
- if (base == 0)
- return;
-
- base2 = CNIC_RD(dev, (CNIC_PATH(cp) ? MISC_REG_GENERIC_CR_1 :
- MISC_REG_GENERIC_CR_0));
- addr = BNX2X_SHMEM_ADDR(base,
- dev_info.port_hw_config[port].iscsi_mac_upper);
-
- addr1 = BNX2X_SHMEM_ADDR(base,
- dev_info.port_hw_config[port].iscsi_mac_lower);
-
- cnic_read_bnx2x_iscsi_mac(dev, addr, addr1);
-
- addr = BNX2X_SHMEM_ADDR(base, validity_map[port]);
- val = CNIC_RD(dev, addr);
-
- if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) {
- u16 val16;
-
- addr = BNX2X_SHMEM_ADDR(base,
- drv_lic_key[port].max_iscsi_init_conn);
- val16 = CNIC_RD16(dev, addr);
-
- if (val16)
- val16 ^= 0x1e1e;
- dev->max_iscsi_conn = val16;
- }
-
- if (BNX2X_CHIP_IS_E2(cp->chip_id))
- dev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
-
- if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) {
- int func = CNIC_FUNC(cp);
- u32 mf_cfg_addr;
-
- if (BNX2X_SHMEM2_HAS(base2, mf_cfg_addr))
- mf_cfg_addr = CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base2,
- mf_cfg_addr));
- else
- mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET;
-
- if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
- /* Must determine if the MF is SD vs SI mode */
- addr = BNX2X_SHMEM_ADDR(base,
- dev_info.shared_feature_config.config);
- val = CNIC_RD(dev, addr);
- if ((val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) ==
- SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT) {
- int rc;
-
- /* MULTI_FUNCTION_SI mode */
- addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
- func_ext_config[func].func_cfg);
- val = CNIC_RD(dev, addr);
- if (!(val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD))
- dev->max_iscsi_conn = 0;
-
- if (!(val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
- dev->max_fcoe_conn = 0;
-
- addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
- func_ext_config[func].
- iscsi_mac_addr_upper);
- addr1 = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
- func_ext_config[func].
- iscsi_mac_addr_lower);
- rc = cnic_read_bnx2x_iscsi_mac(dev, addr,
- addr1);
- if (rc && func > 1)
- dev->max_iscsi_conn = 0;
-
- return;
- }
- }
-
- addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
- func_mf_config[func].e1hov_tag);
-
- val = CNIC_RD(dev, addr);
- val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
- if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
- dev->max_fcoe_conn = 0;
- dev->max_iscsi_conn = 0;
- }
- }
- if (!is_valid_ether_addr(dev->mac_addr))
- dev->max_iscsi_conn = 0;
-}
-
static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
@@ -4909,8 +4805,6 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
cnic_init_bnx2x_kcq(dev);
- cnic_get_bnx2x_iscsi_info(dev);
-
/* Only 1 EQ */
CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
@@ -5343,6 +5237,14 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
cdev->pcidev = pdev;
cp->chip_id = ethdev->chip_id;
+ if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
+ cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
+ if (BNX2X_CHIP_IS_E2(cp->chip_id) &&
+ !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
+ cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
+
+ memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
+
cp->cnic_ops = &cnic_bnx2x_ops;
cp->start_hw = cnic_start_bnx2x_hw;
cp->stop_hw = cnic_stop_bnx2x_hw;
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index b328f6c..4456260 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -220,7 +220,7 @@ struct cnic_local {
#define ULP_F_INIT 0
#define ULP_F_START 1
#define ULP_F_CALL_PENDING 2
- struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
+ struct cnic_ulp_ops __rcu *ulp_ops[MAX_CNIC_ULP_TYPE];
unsigned long cnic_local_flags;
#define CNIC_LCL_FL_KWQ_INIT 0x0
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 9f44e0f..e01b49e 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
-#define CNIC_MODULE_VERSION "2.2.12"
-#define CNIC_MODULE_RELDATE "Jan 03, 2011"
+#define CNIC_MODULE_VERSION "2.2.13"
+#define CNIC_MODULE_RELDATE "Jan 31, 2011"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -159,6 +159,9 @@ struct cnic_eth_dev {
u32 drv_state;
#define CNIC_DRV_STATE_REGD 0x00000001
#define CNIC_DRV_STATE_USING_MSIX 0x00000002
+#define CNIC_DRV_STATE_NO_ISCSI_OOO 0x00000004
+#define CNIC_DRV_STATE_NO_ISCSI 0x00000008
+#define CNIC_DRV_STATE_NO_FCOE 0x00000010
u32 chip_id;
u32 max_kwqe_pending;
struct pci_dev *pdev;
@@ -176,6 +179,7 @@ struct cnic_eth_dev {
u32 fcoe_init_cid;
u16 iscsi_l2_client_id;
u16 iscsi_l2_cid;
+ u8 iscsi_mac[ETH_ALEN];
int num_irq;
struct cnic_irq irq_arr[MAX_CNIC_VEC];
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index ef02aa6..862804f 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -186,9 +186,10 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
dev = NULL;
if (grp)
dev = vlan_group_get_device(grp, vlan);
- } else
+ } else if (netif_is_bond_slave(dev)) {
while (dev->master)
dev = dev->master;
+ }
return dev;
}
}
@@ -967,8 +968,6 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
cxgb_neigh_update((struct neighbour *)ctx);
break;
}
- case (NETEVENT_PMTU_UPDATE):
- break;
case (NETEVENT_REDIRECT):{
struct netevent_redirect *nr = ctx;
cxgb_redirect(nr->old, nr->new);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index ec35d45..5352c8a 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -2471,7 +2471,6 @@ static int netevent_cb(struct notifier_block *nb, unsigned long event,
case NETEVENT_NEIGH_UPDATE:
check_neigh_update(data);
break;
- case NETEVENT_PMTU_UPDATE:
case NETEVENT_REDIRECT:
default:
break;
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index e610e136..00bf595 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -364,6 +364,7 @@ struct e1000_adapter {
/* structs defined in e1000_hw.h */
struct e1000_hw hw;
+ spinlock_t stats64_lock;
struct e1000_hw_stats stats;
struct e1000_phy_info phy_info;
struct e1000_phy_stats phy_stats;
@@ -494,7 +495,9 @@ extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter);
extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
-extern void e1000e_update_stats(struct e1000_adapter *adapter);
+extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64
+ *stats);
extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index fa08b63..65ef9b5 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -46,15 +46,15 @@ struct e1000_stats {
};
#define E1000_STAT(str, m) { \
- .stat_string = str, \
- .type = E1000_STATS, \
- .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
- .stat_offset = offsetof(struct e1000_adapter, m) }
+ .stat_string = str, \
+ .type = E1000_STATS, \
+ .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
+ .stat_offset = offsetof(struct e1000_adapter, m) }
#define E1000_NETDEV_STAT(str, m) { \
- .stat_string = str, \
- .type = NETDEV_STATS, \
- .sizeof_stat = sizeof(((struct net_device *)0)->m), \
- .stat_offset = offsetof(struct net_device, m) }
+ .stat_string = str, \
+ .type = NETDEV_STATS, \
+ .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \
+ .stat_offset = offsetof(struct rtnl_link_stats64, m) }
static const struct e1000_stats e1000_gstrings_stats[] = {
E1000_STAT("rx_packets", stats.gprc),
@@ -65,21 +65,21 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
E1000_STAT("tx_broadcast", stats.bptc),
E1000_STAT("rx_multicast", stats.mprc),
E1000_STAT("tx_multicast", stats.mptc),
- E1000_NETDEV_STAT("rx_errors", stats.rx_errors),
- E1000_NETDEV_STAT("tx_errors", stats.tx_errors),
- E1000_NETDEV_STAT("tx_dropped", stats.tx_dropped),
+ E1000_NETDEV_STAT("rx_errors", rx_errors),
+ E1000_NETDEV_STAT("tx_errors", tx_errors),
+ E1000_NETDEV_STAT("tx_dropped", tx_dropped),
E1000_STAT("multicast", stats.mprc),
E1000_STAT("collisions", stats.colc),
- E1000_NETDEV_STAT("rx_length_errors", stats.rx_length_errors),
- E1000_NETDEV_STAT("rx_over_errors", stats.rx_over_errors),
+ E1000_NETDEV_STAT("rx_length_errors", rx_length_errors),
+ E1000_NETDEV_STAT("rx_over_errors", rx_over_errors),
E1000_STAT("rx_crc_errors", stats.crcerrs),
- E1000_NETDEV_STAT("rx_frame_errors", stats.rx_frame_errors),
+ E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
E1000_STAT("rx_no_buffer_count", stats.rnbc),
E1000_STAT("rx_missed_errors", stats.mpc),
E1000_STAT("tx_aborted_errors", stats.ecol),
E1000_STAT("tx_carrier_errors", stats.tncrs),
- E1000_NETDEV_STAT("tx_fifo_errors", stats.tx_fifo_errors),
- E1000_NETDEV_STAT("tx_heartbeat_errors", stats.tx_heartbeat_errors),
+ E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors),
+ E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors),
E1000_STAT("tx_window_errors", stats.latecol),
E1000_STAT("tx_abort_late_coll", stats.latecol),
E1000_STAT("tx_deferred_ok", stats.dc),
@@ -684,20 +684,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
rx_old = adapter->rx_ring;
err = -ENOMEM;
- tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+ tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL);
if (!tx_ring)
goto err_alloc_tx;
- /*
- * use a memcpy to save any previously configured
- * items like napi structs from having to be
- * reinitialized
- */
- memcpy(tx_ring, tx_old, sizeof(struct e1000_ring));
- rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+ rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL);
if (!rx_ring)
goto err_alloc_rx;
- memcpy(rx_ring, rx_old, sizeof(struct e1000_ring));
adapter->tx_ring = tx_ring;
adapter->rx_ring = rx_ring;
@@ -1255,7 +1248,6 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl_reg = 0;
- u32 stat_reg = 0;
u16 phy_reg = 0;
s32 ret_val = 0;
@@ -1363,8 +1355,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
* Set the ILOS bit on the fiber Nic if half duplex link is
* detected.
*/
- stat_reg = er32(STATUS);
- if ((stat_reg & E1000_STATUS_FD) == 0)
+ if ((er32(STATUS) & E1000_STATUS_FD) == 0)
ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
}
@@ -1972,8 +1963,15 @@ static int e1000_set_coalesce(struct net_device *netdev,
static int e1000_nway_reset(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- if (netif_running(netdev))
- e1000e_reinit_locked(adapter);
+
+ if (!netif_running(netdev))
+ return -EAGAIN;
+
+ if (!adapter->hw.mac.autoneg)
+ return -EINVAL;
+
+ e1000e_reinit_locked(adapter);
+
return 0;
}
@@ -1982,14 +1980,15 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
u64 *data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct rtnl_link_stats64 net_stats;
int i;
char *p = NULL;
- e1000e_update_stats(adapter);
+ e1000e_get_stats64(netdev, &net_stats);
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
switch (e1000_gstrings_stats[i].type) {
case NETDEV_STATS:
- p = (char *) netdev +
+ p = (char *) &net_stats +
e1000_gstrings_stats[i].stat_offset;
break;
case E1000_STATS:
@@ -2014,7 +2013,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
switch (stringset) {
case ETH_SS_TEST:
- memcpy(data, *e1000_gstrings_test, sizeof(e1000_gstrings_test));
+ memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test));
break;
case ETH_SS_STATS:
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index fb46974..232b42b 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -2104,7 +2104,6 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
{
union ich8_hws_flash_status hsfsts;
s32 ret_val = -E1000_ERR_NVM;
- s32 i = 0;
hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
@@ -2140,6 +2139,8 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
ret_val = 0;
} else {
+ s32 i = 0;
+
/*
* Otherwise poll for sometime so the current
* cycle has a chance to end before giving up.
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 68aa174..96921de5 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1978,15 +1978,15 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 eecd = er32(EECD);
- u16 timeout = 0;
u8 spi_stat_reg;
if (nvm->type == e1000_nvm_eeprom_spi) {
+ u16 timeout = NVM_MAX_RETRY_SPI;
+
/* Clear SK and CS */
eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
ew32(EECD, eecd);
udelay(1);
- timeout = NVM_MAX_RETRY_SPI;
/*
* Read "Status Register" repeatedly until the LSB is cleared.
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 3fa110d..ec0b803 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -900,8 +900,6 @@ next_desc:
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
- netdev->stats.rx_bytes += total_rx_bytes;
- netdev->stats.rx_packets += total_rx_packets;
return cleaned;
}
@@ -1060,8 +1058,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
}
adapter->total_tx_bytes += total_tx_bytes;
adapter->total_tx_packets += total_tx_packets;
- netdev->stats.tx_bytes += total_tx_bytes;
- netdev->stats.tx_packets += total_tx_packets;
return count < tx_ring->count;
}
@@ -1248,8 +1244,6 @@ next_desc:
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
- netdev->stats.rx_bytes += total_rx_bytes;
- netdev->stats.rx_packets += total_rx_packets;
return cleaned;
}
@@ -1429,8 +1423,6 @@ next_desc:
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
- netdev->stats.rx_bytes += total_rx_bytes;
- netdev->stats.rx_packets += total_rx_packets;
return cleaned;
}
@@ -1857,7 +1849,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
int err = 0, vector = 0;
if (strlen(netdev->name) < (IFNAMSIZ - 5))
- sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
+ snprintf(adapter->rx_ring->name,
+ sizeof(adapter->rx_ring->name) - 1,
+ "%s-rx-0", netdev->name);
else
memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
err = request_irq(adapter->msix_entries[vector].vector,
@@ -1870,7 +1864,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
vector++;
if (strlen(netdev->name) < (IFNAMSIZ - 5))
- sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
+ snprintf(adapter->tx_ring->name,
+ sizeof(adapter->tx_ring->name) - 1,
+ "%s-tx-0", netdev->name);
else
memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
err = request_irq(adapter->msix_entries[vector].vector,
@@ -2734,7 +2730,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 rctl, rfctl;
- u32 psrctl = 0;
u32 pages = 0;
/* Workaround Si errata on 82579 - configure jumbo frame flow */
@@ -2833,6 +2828,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
adapter->rx_ps_pages = 0;
if (adapter->rx_ps_pages) {
+ u32 psrctl = 0;
+
/* Configure extra packet-split registers */
rfctl = er32(RFCTL);
rfctl |= E1000_RFCTL_EXTEN;
@@ -3034,7 +3031,6 @@ static void e1000_set_multi(struct net_device *netdev)
struct netdev_hw_addr *ha;
u8 *mta_list;
u32 rctl;
- int i;
/* Check for Promiscuous and All Multicast modes */
@@ -3057,12 +3053,13 @@ static void e1000_set_multi(struct net_device *netdev)
ew32(RCTL, rctl);
if (!netdev_mc_empty(netdev)) {
+ int i = 0;
+
mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
if (!mta_list)
return;
/* prepare a packed array of only addresses. */
- i = 0;
netdev_for_each_mc_addr(ha, netdev)
memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
@@ -3359,6 +3356,8 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
e1e_flush();
}
+static void e1000e_update_stats(struct e1000_adapter *adapter);
+
void e1000e_down(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -3393,6 +3392,11 @@ void e1000e_down(struct e1000_adapter *adapter)
del_timer_sync(&adapter->phy_info_timer);
netif_carrier_off(netdev);
+
+ spin_lock(&adapter->stats64_lock);
+ e1000e_update_stats(adapter);
+ spin_unlock(&adapter->stats64_lock);
+
adapter->link_speed = 0;
adapter->link_duplex = 0;
@@ -3437,6 +3441,8 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+ spin_lock_init(&adapter->stats64_lock);
+
e1000e_set_interrupt_capability(adapter);
if (e1000_alloc_queues(adapter))
@@ -3918,7 +3924,7 @@ release:
* e1000e_update_stats - Update the board statistics counters
* @adapter: board private structure
**/
-void e1000e_update_stats(struct e1000_adapter *adapter)
+static void e1000e_update_stats(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
@@ -4030,10 +4036,11 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_phy_regs *phy = &adapter->phy_regs;
- int ret_val;
if ((er32(STATUS) & E1000_STATUS_LU) &&
(adapter->hw.phy.media_type == e1000_media_type_copper)) {
+ int ret_val;
+
ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
@@ -4179,7 +4186,6 @@ static void e1000_watchdog_task(struct work_struct *work)
struct e1000_ring *tx_ring = adapter->tx_ring;
struct e1000_hw *hw = &adapter->hw;
u32 link, tctl;
- int tx_pending = 0;
if (test_bit(__E1000_DOWN, &adapter->state))
return;
@@ -4320,7 +4326,9 @@ static void e1000_watchdog_task(struct work_struct *work)
}
link_up:
+ spin_lock(&adapter->stats64_lock);
e1000e_update_stats(adapter);
+ spin_unlock(&adapter->stats64_lock);
mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
adapter->tpt_old = adapter->stats.tpt;
@@ -4334,20 +4342,17 @@ link_up:
e1000e_update_adaptive(&adapter->hw);
- if (!netif_carrier_ok(netdev)) {
- tx_pending = (e1000_desc_unused(tx_ring) + 1 <
- tx_ring->count);
- if (tx_pending) {
- /*
- * We've lost link, so the controller stops DMA,
- * but we've got queued Tx work that's never going
- * to get done, so reset controller to flush Tx.
- * (Do the reset outside of interrupt context).
- */
- schedule_work(&adapter->reset_task);
- /* return immediately since reset is imminent */
- return;
- }
+ if (!netif_carrier_ok(netdev) &&
+ (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
+ /*
+ * We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context).
+ */
+ schedule_work(&adapter->reset_task);
+ /* return immediately since reset is imminent */
+ return;
}
/* Simple mode for Interrupt Throttle Rate (ITR) */
@@ -4411,13 +4416,13 @@ static int e1000_tso(struct e1000_adapter *adapter,
u32 cmd_length = 0;
u16 ipcse = 0, tucse, mss;
u8 ipcss, ipcso, tucss, tucso, hdr_len;
- int err;
if (!skb_is_gso(skb))
return 0;
if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+
if (err)
return err;
}
@@ -4928,16 +4933,55 @@ static void e1000_reset_task(struct work_struct *work)
}
/**
- * e1000_get_stats - Get System Network Statistics
+ * e1000_get_stats64 - Get System Network Statistics
* @netdev: network interface device structure
+ * @stats: rtnl_link_stats64 pointer
*
* Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
**/
-static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
+struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
{
- /* only return the current stats */
- return &netdev->stats;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ memset(stats, 0, sizeof(struct rtnl_link_stats64));
+ spin_lock(&adapter->stats64_lock);
+ e1000e_update_stats(adapter);
+ /* Fill out the OS statistics structure */
+ stats->rx_bytes = adapter->stats.gorc;
+ stats->rx_packets = adapter->stats.gprc;
+ stats->tx_bytes = adapter->stats.gotc;
+ stats->tx_packets = adapter->stats.gptc;
+ stats->multicast = adapter->stats.mprc;
+ stats->collisions = adapter->stats.colc;
+
+ /* Rx Errors */
+
+ /*
+ * RLEC on some newer hardware can be incorrect so build
+ * our own version based on RUC and ROC
+ */
+ stats->rx_errors = adapter->stats.rxerrc +
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc +
+ adapter->stats.cexterr;
+ stats->rx_length_errors = adapter->stats.ruc +
+ adapter->stats.roc;
+ stats->rx_crc_errors = adapter->stats.crcerrs;
+ stats->rx_frame_errors = adapter->stats.algnerrc;
+ stats->rx_missed_errors = adapter->stats.mpc;
+
+ /* Tx Errors */
+ stats->tx_errors = adapter->stats.ecol +
+ adapter->stats.latecol;
+ stats->tx_aborted_errors = adapter->stats.ecol;
+ stats->tx_window_errors = adapter->stats.latecol;
+ stats->tx_carrier_errors = adapter->stats.tncrs;
+
+ /* Tx Dropped needs to be maintained elsewhere */
+
+ spin_unlock(&adapter->stats64_lock);
+ return stats;
}
/**
@@ -5507,9 +5551,10 @@ static irqreturn_t e1000_intr_msix(int irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
- int vector, msix_irq;
if (adapter->msix_entries) {
+ int vector, msix_irq;
+
vector = 0;
msix_irq = adapter->msix_entries[vector].vector;
disable_irq(msix_irq);
@@ -5706,7 +5751,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
.ndo_open = e1000_open,
.ndo_stop = e1000_close,
.ndo_start_xmit = e1000_xmit_frame,
- .ndo_get_stats = e1000_get_stats,
+ .ndo_get_stats64 = e1000e_get_stats64,
.ndo_set_multicast_list = e1000_set_multi,
.ndo_set_mac_address = e1000_set_mac,
.ndo_change_mtu = e1000_change_mtu,
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 6bea051..6ae31fc 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -2409,9 +2409,7 @@ static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg)
s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
{
s32 ret_val;
- u32 page_select = 0;
u32 page = offset >> IGP_PAGE_SHIFT;
- u32 page_shift = 0;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
@@ -2427,6 +2425,8 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ u32 page_shift, page_select;
+
/*
* Page select is register 31 for phy address 1 and 22 for
* phy address 2 and 3. Page select is shifted only for
@@ -2468,9 +2468,7 @@ out:
s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
{
s32 ret_val;
- u32 page_select = 0;
u32 page = offset >> IGP_PAGE_SHIFT;
- u32 page_shift = 0;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
@@ -2486,6 +2484,8 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
if (offset > MAX_PHY_MULTI_PAGE_REG) {
+ u32 page_shift, page_select;
+
/*
* Page select is register 31 for phy address 1 and 22 for
* phy address 2 and 3. Page select is shifted only for
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index e7b6c31..2e573be 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_ENIC) := enic.o
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
- enic_res.o vnic_dev.o vnic_rq.o vnic_vic.o
+ enic_res.o enic_dev.o vnic_dev.o vnic_rq.o vnic_vic.o
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index a937f49..aee5256 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,13 +32,13 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "1.4.1.10"
-#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc"
+#define DRV_VERSION "2.1.1.9"
+#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
-#define ENIC_WQ_MAX 8
-#define ENIC_RQ_MAX 8
+#define ENIC_WQ_MAX 1
+#define ENIC_RQ_MAX 1
#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
@@ -49,7 +49,7 @@ struct enic_msix_entry {
void *devid;
};
-#define ENIC_SET_APPLIED (1 << 0)
+#define ENIC_PORT_REQUEST_APPLIED (1 << 0)
#define ENIC_SET_REQUEST (1 << 1)
#define ENIC_SET_NAME (1 << 2)
#define ENIC_SET_INSTANCE (1 << 3)
@@ -101,7 +101,6 @@ struct enic {
/* receive queue cache line section */
____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
unsigned int rq_count;
- int (*rq_alloc_buf)(struct vnic_rq *rq);
u64 rq_truncated_pkts;
u64 rq_bad_fcs;
struct napi_struct napi[ENIC_RQ_MAX];
diff --git a/drivers/net/enic/enic_dev.c b/drivers/net/enic/enic_dev.c
new file mode 100644
index 0000000..37ad3a1
--- /dev/null
+++ b/drivers/net/enic/enic_dev.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2011 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+
+#include "vnic_dev.h"
+#include "vnic_vic.h"
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_dev.h"
+
+int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_fw_info(enic->vdev, fw_info);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_stats_dump(enic->vdev, vstats);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_add_station_addr(struct enic *enic)
+{
+ int err;
+
+ if (!is_valid_ether_addr(enic->netdev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_del_station_addr(struct enic *enic)
+{
+ int err;
+
+ if (!is_valid_ether_addr(enic->netdev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
+ int broadcast, int promisc, int allmulti)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_packet_filter(enic->vdev, directed,
+ multicast, broadcast, promisc, allmulti);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_add_addr(struct enic *enic, u8 *addr)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_add_addr(enic->vdev, addr);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_del_addr(struct enic *enic, u8 *addr)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_del_addr(enic->vdev, addr);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_notify_unset(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_notify_unset(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_hang_notify(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_hang_notify(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
+ IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_enable(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_enable_wait(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_disable(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_disable(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_vnic_dev_deinit(struct enic *enic)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_deinit(enic->vdev);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_init_prov(enic->vdev,
+ (u8 *)vp, vic_provinfo_size(vp));
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_init_done(struct enic *enic, int *done, int *error)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_init_done(enic->vdev, done, error);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+/* rtnl lock is held */
+void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ spin_lock(&enic->devcmd_lock);
+ enic_add_vlan(enic, vid);
+ spin_unlock(&enic->devcmd_lock);
+}
+
+/* rtnl lock is held */
+void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ spin_lock(&enic->devcmd_lock);
+ enic_del_vlan(enic, vid);
+ spin_unlock(&enic->devcmd_lock);
+}
diff --git a/drivers/net/enic/enic_dev.h b/drivers/net/enic/enic_dev.h
new file mode 100644
index 0000000..495f57f
--- /dev/null
+++ b/drivers/net/enic/enic_dev.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2011 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _ENIC_DEV_H_
+#define _ENIC_DEV_H_
+
+int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info);
+int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats);
+int enic_dev_add_station_addr(struct enic *enic);
+int enic_dev_del_station_addr(struct enic *enic);
+int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
+ int broadcast, int promisc, int allmulti);
+int enic_dev_add_addr(struct enic *enic, u8 *addr);
+int enic_dev_del_addr(struct enic *enic, u8 *addr);
+void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+int enic_dev_notify_unset(struct enic *enic);
+int enic_dev_hang_notify(struct enic *enic);
+int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
+int enic_dev_enable(struct enic *enic);
+int enic_dev_disable(struct enic *enic);
+int enic_vnic_dev_deinit(struct enic *enic);
+int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp);
+int enic_dev_init_done(struct enic *enic, int *done, int *error);
+
+#endif /* _ENIC_DEV_H_ */
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index a0af48c..4f1710e 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -44,6 +44,7 @@
#include "vnic_vic.h"
#include "enic_res.h"
#include "enic.h"
+#include "enic_dev.h"
#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
@@ -190,18 +191,6 @@ static int enic_get_settings(struct net_device *netdev,
return 0;
}
-static int enic_dev_fw_info(struct enic *enic,
- struct vnic_devcmd_fw_info **fw_info)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_fw_info(enic->vdev, fw_info);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
static void enic_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
@@ -246,17 +235,6 @@ static int enic_get_sset_count(struct net_device *netdev, int sset)
}
}
-static int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_stats_dump(enic->vdev, vstats);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
static void enic_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
@@ -896,9 +874,10 @@ static struct net_device_stats *enic_get_stats(struct net_device *netdev)
return net_stats;
}
-static void enic_reset_multicast_list(struct enic *enic)
+static void enic_reset_addr_lists(struct enic *enic)
{
enic->mc_count = 0;
+ enic->uc_count = 0;
enic->flags = 0;
}
@@ -919,32 +898,6 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
return 0;
}
-static int enic_dev_add_station_addr(struct enic *enic)
-{
- int err = 0;
-
- if (is_valid_ether_addr(enic->netdev->dev_addr)) {
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
- spin_unlock(&enic->devcmd_lock);
- }
-
- return err;
-}
-
-static int enic_dev_del_station_addr(struct enic *enic)
-{
- int err = 0;
-
- if (is_valid_ether_addr(enic->netdev->dev_addr)) {
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
- spin_unlock(&enic->devcmd_lock);
- }
-
- return err;
-}
-
static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
{
struct enic *enic = netdev_priv(netdev);
@@ -989,42 +942,7 @@ static int enic_set_mac_address(struct net_device *netdev, void *p)
return enic_dev_add_station_addr(enic);
}
-static int enic_dev_packet_filter(struct enic *enic, int directed,
- int multicast, int broadcast, int promisc, int allmulti)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_packet_filter(enic->vdev, directed,
- multicast, broadcast, promisc, allmulti);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_dev_add_addr(struct enic *enic, u8 *addr)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_add_addr(enic->vdev, addr);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_dev_del_addr(struct enic *enic, u8 *addr)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_del_addr(enic->vdev, addr);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static void enic_add_multicast_addr_list(struct enic *enic)
+static void enic_update_multicast_addr_list(struct enic *enic)
{
struct net_device *netdev = enic->netdev;
struct netdev_hw_addr *ha;
@@ -1079,7 +997,7 @@ static void enic_add_multicast_addr_list(struct enic *enic)
enic->mc_count = mc_count;
}
-static void enic_add_unicast_addr_list(struct enic *enic)
+static void enic_update_unicast_addr_list(struct enic *enic)
{
struct net_device *netdev = enic->netdev;
struct netdev_hw_addr *ha;
@@ -1156,9 +1074,9 @@ static void enic_set_rx_mode(struct net_device *netdev)
}
if (!promisc) {
- enic_add_unicast_addr_list(enic);
+ enic_update_unicast_addr_list(enic);
if (!allmulti)
- enic_add_multicast_addr_list(enic);
+ enic_update_multicast_addr_list(enic);
}
}
@@ -1170,26 +1088,6 @@ static void enic_vlan_rx_register(struct net_device *netdev,
enic->vlan_group = vlan_group;
}
-/* rtnl lock is held */
-static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
-{
- struct enic *enic = netdev_priv(netdev);
-
- spin_lock(&enic->devcmd_lock);
- enic_add_vlan(enic, vid);
- spin_unlock(&enic->devcmd_lock);
-}
-
-/* rtnl lock is held */
-static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
-{
- struct enic *enic = netdev_priv(netdev);
-
- spin_lock(&enic->devcmd_lock);
- enic_del_vlan(enic, vid);
- spin_unlock(&enic->devcmd_lock);
-}
-
/* netif_tx_lock held, BHs disabled */
static void enic_tx_timeout(struct net_device *netdev)
{
@@ -1197,40 +1095,6 @@ static void enic_tx_timeout(struct net_device *netdev)
schedule_work(&enic->reset);
}
-static int enic_vnic_dev_deinit(struct enic *enic)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_deinit(enic->vdev);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_init_prov(enic->vdev,
- (u8 *)vp, vic_provinfo_size(vp));
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_dev_init_done(struct enic *enic, int *done, int *error)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_init_done(enic->vdev, done, error);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct enic *enic = netdev_priv(netdev);
@@ -1318,18 +1182,20 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
vic_provinfo_free(vp);
if (err)
return err;
-
- enic->pp.set |= ENIC_SET_APPLIED;
break;
case PORT_REQUEST_DISASSOCIATE:
- enic->pp.set &= ~ENIC_SET_APPLIED;
break;
default:
return -EINVAL;
}
+ /* Set flag to indicate that the port assoc/disassoc
+ * request has been sent out to fw
+ */
+ enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
+
return 0;
}
@@ -1379,9 +1245,6 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
if (is_zero_ether_addr(netdev->dev_addr))
random_ether_addr(netdev->dev_addr);
- } else if (new_pp.request == PORT_REQUEST_DISASSOCIATE) {
- if (!is_zero_ether_addr(enic->pp.mac_addr))
- enic_dev_del_addr(enic, enic->pp.mac_addr);
}
memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile));
@@ -1390,9 +1253,6 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
if (err)
goto set_port_profile_cleanup;
- if (!is_zero_ether_addr(enic->pp.mac_addr))
- enic_dev_add_addr(enic, enic->pp.mac_addr);
-
set_port_profile_cleanup:
memset(enic->pp.vf_mac, 0, ETH_ALEN);
@@ -1411,7 +1271,7 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
int err, error, done;
u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
- if (!(enic->pp.set & ENIC_SET_APPLIED))
+ if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED))
return -ENODATA;
err = enic_dev_init_done(enic, &done, &error);
@@ -1489,62 +1349,6 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
return 0;
}
-static int enic_rq_alloc_buf_a1(struct vnic_rq *rq)
-{
- struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
-
- if (vnic_rq_posting_soon(rq)) {
-
- /* SW workaround for A0 HW erratum: if we're just about
- * to write posted_index, insert a dummy desc
- * of type resvd
- */
-
- rq_enet_desc_enc(desc, 0, RQ_ENET_TYPE_RESV2, 0);
- vnic_rq_post(rq, 0, 0, 0, 0);
- } else {
- return enic_rq_alloc_buf(rq);
- }
-
- return 0;
-}
-
-static int enic_dev_hw_version(struct enic *enic,
- enum vnic_dev_hw_version *hw_ver)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_hw_version(enic->vdev, hw_ver);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_set_rq_alloc_buf(struct enic *enic)
-{
- enum vnic_dev_hw_version hw_ver;
- int err;
-
- err = enic_dev_hw_version(enic, &hw_ver);
- if (err)
- return err;
-
- switch (hw_ver) {
- case VNIC_DEV_HW_VER_A1:
- enic->rq_alloc_buf = enic_rq_alloc_buf_a1;
- break;
- case VNIC_DEV_HW_VER_A2:
- case VNIC_DEV_HW_VER_UNKNOWN:
- enic->rq_alloc_buf = enic_rq_alloc_buf;
- break;
- default:
- return -ENODEV;
- }
-
- return 0;
-}
-
static void enic_rq_indicate_buf(struct vnic_rq *rq,
struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
int skipped, void *opaque)
@@ -1681,7 +1485,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
+ err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
/* Buffer allocation failed. Stay in polling
* mode so we can try to fill the ring again.
@@ -1731,7 +1535,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- err = vnic_rq_fill(&enic->rq[rq], enic->rq_alloc_buf);
+ err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
/* Buffer allocation failed. Stay in polling mode
* so we can try to fill the ring again.
@@ -1901,39 +1705,6 @@ static int enic_dev_notify_set(struct enic *enic)
return err;
}
-static int enic_dev_notify_unset(struct enic *enic)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_notify_unset(enic->vdev);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_dev_enable(struct enic *enic)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_enable_wait(enic->vdev);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_dev_disable(struct enic *enic)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_disable(enic->vdev);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
static void enic_notify_timer_start(struct enic *enic)
{
switch (vnic_dev_get_intr_mode(enic->vdev)) {
@@ -1967,7 +1738,7 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->rq_count; i++) {
- vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
+ vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
/* Need at least one buffer on ring to get going */
if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
netdev_err(netdev, "Unable to alloc receive buffers\n");
@@ -2285,29 +2056,6 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
rss_hash_bits, rss_base_cpu, rss_enable);
}
-static int enic_dev_hang_notify(struct enic *enic)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_hang_notify(enic->vdev);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
-static int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
-{
- int err;
-
- spin_lock(&enic->devcmd_lock);
- err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
- IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
- spin_unlock(&enic->devcmd_lock);
-
- return err;
-}
-
static void enic_reset(struct work_struct *work)
{
struct enic *enic = container_of(work, struct enic, reset);
@@ -2320,7 +2068,7 @@ static void enic_reset(struct work_struct *work)
enic_dev_hang_notify(enic);
enic_stop(enic->netdev);
enic_dev_hang_reset(enic);
- enic_reset_multicast_list(enic);
+ enic_reset_addr_lists(enic);
enic_init_vnic_resources(enic);
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
@@ -2332,7 +2080,7 @@ static void enic_reset(struct work_struct *work)
static int enic_set_intr_mode(struct enic *enic)
{
unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
- unsigned int m = 1;
+ unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
unsigned int i;
/* Set interrupt mode (INTx, MSI, MSI-X) depending
@@ -2475,9 +2223,7 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
.ndo_tx_timeout = enic_tx_timeout,
.ndo_set_vf_port = enic_set_vf_port,
.ndo_get_vf_port = enic_get_vf_port,
-#ifdef IFLA_VF_MAX
.ndo_set_vf_mac = enic_set_vf_mac,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = enic_poll_controller,
#endif
@@ -2556,25 +2302,12 @@ static int enic_dev_init(struct enic *enic)
enic_init_vnic_resources(enic);
- err = enic_set_rq_alloc_buf(enic);
- if (err) {
- dev_err(dev, "Failed to set RQ buffer allocator, aborting\n");
- goto err_out_free_vnic_resources;
- }
-
err = enic_set_rss_nic_cfg(enic);
if (err) {
dev_err(dev, "Failed to config nic, aborting\n");
goto err_out_free_vnic_resources;
}
- err = enic_dev_set_ig_vlan_rewrite_mode(enic);
- if (err) {
- dev_err(dev,
- "Failed to set ingress vlan rewrite mode, aborting.\n");
- goto err_out_free_vnic_resources;
- }
-
switch (vnic_dev_get_intr_mode(enic->vdev)) {
default:
netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
@@ -2713,6 +2446,22 @@ static int __devinit enic_probe(struct pci_dev *pdev,
goto err_out_vnic_unregister;
}
+ /* Setup devcmd lock
+ */
+
+ spin_lock_init(&enic->devcmd_lock);
+
+ /*
+ * Set ingress vlan rewrite mode before vnic initialization
+ */
+
+ err = enic_dev_set_ig_vlan_rewrite_mode(enic);
+ if (err) {
+ dev_err(dev,
+ "Failed to set ingress vlan rewrite mode, aborting.\n");
+ goto err_out_dev_close;
+ }
+
/* Issue device init to initialize the vnic-to-switch link.
* We'll start with carrier off and wait for link UP
* notification later to turn on carrier. We don't need
@@ -2736,11 +2485,6 @@ static int __devinit enic_probe(struct pci_dev *pdev,
}
}
- /* Setup devcmd lock
- */
-
- spin_lock_init(&enic->devcmd_lock);
-
err = enic_dev_init(enic);
if (err) {
dev_err(dev, "Device initialization failed, aborting\n");
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index fb35d8b..c489e72 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -419,25 +419,6 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
return err;
}
-int vnic_dev_hw_version(struct vnic_dev *vdev, enum vnic_dev_hw_version *hw_ver)
-{
- struct vnic_devcmd_fw_info *fw_info;
- int err;
-
- err = vnic_dev_fw_info(vdev, &fw_info);
- if (err)
- return err;
-
- if (strncmp(fw_info->hw_version, "A1", sizeof("A1")) == 0)
- *hw_ver = VNIC_DEV_HW_VER_A1;
- else if (strncmp(fw_info->hw_version, "A2", sizeof("A2")) == 0)
- *hw_ver = VNIC_DEV_HW_VER_A2;
- else
- *hw_ver = VNIC_DEV_HW_VER_UNKNOWN;
-
- return 0;
-}
-
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
void *value)
{
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index 05f9a24..e837546 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -44,12 +44,6 @@ static inline void writeq(u64 val, void __iomem *reg)
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-enum vnic_dev_hw_version {
- VNIC_DEV_HW_VER_UNKNOWN,
- VNIC_DEV_HW_VER_A1,
- VNIC_DEV_HW_VER_A2,
-};
-
enum vnic_dev_intr_mode {
VNIC_DEV_INTR_MODE_UNKNOWN,
VNIC_DEV_INTR_MODE_INTX,
@@ -93,8 +87,6 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait);
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info);
-int vnic_dev_hw_version(struct vnic_dev *vdev,
- enum vnic_dev_hw_version *hw_ver);
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
void *value);
int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h
index 37f08de..2056586 100644
--- a/drivers/net/enic/vnic_rq.h
+++ b/drivers/net/enic/vnic_rq.h
@@ -141,11 +141,6 @@ static inline void vnic_rq_post(struct vnic_rq *rq,
}
}
-static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
-{
- return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
-}
-
static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
{
rq->ring.desc_avail += count;
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 2a71373..74798be 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -54,7 +54,7 @@
#include "fec.h"
-#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+#if defined(CONFIG_ARM)
#define FEC_ALIGNMENT 0xf
#else
#define FEC_ALIGNMENT 0x3
@@ -147,8 +147,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
* account when setting it.
*/
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
- defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
#else
#define OPT_FRAME_SIZE 0
@@ -183,7 +182,7 @@ struct fec_enet_private {
struct bufdesc *rx_bd_base;
struct bufdesc *tx_bd_base;
/* The next free ring entry */
- struct bufdesc *cur_rx, *cur_tx;
+ struct bufdesc *cur_rx, *cur_tx;
/* The ring entries to be free()ed */
struct bufdesc *dirty_tx;
@@ -191,28 +190,21 @@ struct fec_enet_private {
/* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
spinlock_t hw_lock;
- struct platform_device *pdev;
+ struct platform_device *pdev;
int opened;
/* Phylib and MDIO interface */
- struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
- int mii_timeout;
- uint phy_speed;
+ struct mii_bus *mii_bus;
+ struct phy_device *phy_dev;
+ int mii_timeout;
+ uint phy_speed;
phy_interface_t phy_interface;
int link;
int full_duplex;
struct completion mdio_done;
};
-static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
-static void fec_enet_tx(struct net_device *dev);
-static void fec_enet_rx(struct net_device *dev);
-static int fec_enet_close(struct net_device *dev);
-static void fec_restart(struct net_device *dev, int duplex);
-static void fec_stop(struct net_device *dev);
-
/* FEC MII MMFR bits definition */
#define FEC_MMFR_ST (1 << 30)
#define FEC_MMFR_OP_READ (2 << 28)
@@ -239,9 +231,9 @@ static void *swap_buffer(void *bufaddr, int len)
}
static netdev_tx_t
-fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
struct bufdesc *bdp;
@@ -262,9 +254,9 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (status & BD_ENET_TX_READY) {
/* Ooops. All transmit buffers are full. Bail out.
- * This should not happen, since dev->tbusy should be set.
+ * This should not happen, since ndev->tbusy should be set.
*/
- printk("%s: tx queue full!.\n", dev->name);
+ printk("%s: tx queue full!.\n", ndev->name);
spin_unlock_irqrestore(&fep->hw_lock, flags);
return NETDEV_TX_BUSY;
}
@@ -284,7 +276,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
unsigned int index;
index = bdp - fep->tx_bd_base;
- memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len);
+ memcpy(fep->tx_bounce[index], skb->data, skb->len);
bufaddr = fep->tx_bounce[index];
}
@@ -299,13 +291,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Save skb pointer */
fep->tx_skbuff[fep->skb_cur] = skb;
- dev->stats.tx_bytes += skb->len;
+ ndev->stats.tx_bytes += skb->len;
fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
/* Push the data cache so the CPM does not get stale memory
* data.
*/
- bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr,
+ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
/* Send it on its way. Tell FEC it's ready, interrupt when done,
@@ -326,7 +318,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (bdp == fep->dirty_tx) {
fep->tx_full = 1;
- netif_stop_queue(dev);
+ netif_stop_queue(ndev);
}
fep->cur_tx = bdp;
@@ -336,62 +328,170 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+/* This function is called to start or restart the FEC during a link
+ * change. This only happens when switching between half and full
+ * duplex.
+ */
static void
-fec_timeout(struct net_device *dev)
+fec_restart(struct net_device *ndev, int duplex)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ int i;
+ u32 temp_mac[2];
+ u32 rcntl = OPT_FRAME_SIZE | 0x04;
- dev->stats.tx_errors++;
+ /* Whack a reset. We should wait for this. */
+ writel(1, fep->hwp + FEC_ECNTRL);
+ udelay(10);
- fec_restart(dev, fep->full_duplex);
- netif_wake_queue(dev);
-}
+ /*
+ * enet-mac reset will reset mac address registers too,
+ * so need to reconfigure it.
+ */
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
+ writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
+ writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
+ }
-static irqreturn_t
-fec_enet_interrupt(int irq, void * dev_id)
-{
- struct net_device *dev = dev_id;
- struct fec_enet_private *fep = netdev_priv(dev);
- uint int_events;
- irqreturn_t ret = IRQ_NONE;
+ /* Clear any outstanding interrupt. */
+ writel(0xffc00000, fep->hwp + FEC_IEVENT);
- do {
- int_events = readl(fep->hwp + FEC_IEVENT);
- writel(int_events, fep->hwp + FEC_IEVENT);
+ /* Reset all multicast. */
+ writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+ writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+#ifndef CONFIG_M5272
+ writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
+ writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
+#endif
- if (int_events & FEC_ENET_RXF) {
- ret = IRQ_HANDLED;
- fec_enet_rx(dev);
- }
+ /* Set maximum receive buffer size. */
+ writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
- /* Transmit OK, or non-fatal error. Update the buffer
- * descriptors. FEC handles all errors, we just discover
- * them as part of the transmit process.
- */
- if (int_events & FEC_ENET_TXF) {
- ret = IRQ_HANDLED;
- fec_enet_tx(dev);
+ /* Set receive and transmit descriptor base. */
+ writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
+ writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
+ fep->hwp + FEC_X_DES_START);
+
+ fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
+ fep->cur_rx = fep->rx_bd_base;
+
+ /* Reset SKB transmit buffers. */
+ fep->skb_cur = fep->skb_dirty = 0;
+ for (i = 0; i <= TX_RING_MOD_MASK; i++) {
+ if (fep->tx_skbuff[i]) {
+ dev_kfree_skb_any(fep->tx_skbuff[i]);
+ fep->tx_skbuff[i] = NULL;
}
+ }
- if (int_events & FEC_ENET_MII) {
- ret = IRQ_HANDLED;
- complete(&fep->mdio_done);
+ /* Enable MII mode */
+ if (duplex) {
+ /* FD enable */
+ writel(0x04, fep->hwp + FEC_X_CNTRL);
+ } else {
+ /* No Rcv on Xmit */
+ rcntl |= 0x02;
+ writel(0x0, fep->hwp + FEC_X_CNTRL);
+ }
+
+ fep->full_duplex = duplex;
+
+ /* Set MII speed */
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+
+ /*
+ * The phy interface and speed need to get configured
+ * differently on enet-mac.
+ */
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+ /* Enable flow control and length check */
+ rcntl |= 0x40000000 | 0x00000020;
+
+ /* MII or RMII */
+ if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+ rcntl |= (1 << 8);
+ else
+ rcntl &= ~(1 << 8);
+
+ /* 10M or 100M */
+ if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
+ rcntl &= ~(1 << 9);
+ else
+ rcntl |= (1 << 9);
+
+ } else {
+#ifdef FEC_MIIGSK_ENR
+ if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
+ /* disable the gasket and wait */
+ writel(0, fep->hwp + FEC_MIIGSK_ENR);
+ while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
+ udelay(1);
+
+ /*
+ * configure the gasket:
+ * RMII, 50 MHz, no loopback, no echo
+ */
+ writel(1, fep->hwp + FEC_MIIGSK_CFGR);
+
+ /* re-enable the gasket */
+ writel(2, fep->hwp + FEC_MIIGSK_ENR);
}
- } while (int_events);
+#endif
+ }
+ writel(rcntl, fep->hwp + FEC_R_CNTRL);
- return ret;
+ /* And last, enable the transmit and receive processing */
+ writel(2, fep->hwp + FEC_ECNTRL);
+ writel(0, fep->hwp + FEC_R_DES_ACTIVE);
+
+ /* Enable interrupts we wish to service */
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+}
+
+static void
+fec_stop(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ /* We cannot expect a graceful transmit stop without link !!! */
+ if (fep->link) {
+ writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
+ udelay(10);
+ if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
+ printk("fec_stop : Graceful transmit stop did not complete !\n");
+ }
+
+ /* Whack a reset. We should wait for this. */
+ writel(1, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
}
static void
-fec_enet_tx(struct net_device *dev)
+fec_timeout(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ ndev->stats.tx_errors++;
+
+ fec_restart(ndev, fep->full_duplex);
+ netif_wake_queue(ndev);
+}
+
+static void
+fec_enet_tx(struct net_device *ndev)
{
struct fec_enet_private *fep;
struct bufdesc *bdp;
unsigned short status;
struct sk_buff *skb;
- fep = netdev_priv(dev);
+ fep = netdev_priv(ndev);
spin_lock(&fep->hw_lock);
bdp = fep->dirty_tx;
@@ -399,7 +499,8 @@ fec_enet_tx(struct net_device *dev)
if (bdp == fep->cur_tx && fep->tx_full == 0)
break;
- dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
bdp->cbd_bufaddr = 0;
skb = fep->tx_skbuff[fep->skb_dirty];
@@ -407,19 +508,19 @@ fec_enet_tx(struct net_device *dev)
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
BD_ENET_TX_RL | BD_ENET_TX_UN |
BD_ENET_TX_CSL)) {
- dev->stats.tx_errors++;
+ ndev->stats.tx_errors++;
if (status & BD_ENET_TX_HB) /* No heartbeat */
- dev->stats.tx_heartbeat_errors++;
+ ndev->stats.tx_heartbeat_errors++;
if (status & BD_ENET_TX_LC) /* Late collision */
- dev->stats.tx_window_errors++;
+ ndev->stats.tx_window_errors++;
if (status & BD_ENET_TX_RL) /* Retrans limit */
- dev->stats.tx_aborted_errors++;
+ ndev->stats.tx_aborted_errors++;
if (status & BD_ENET_TX_UN) /* Underrun */
- dev->stats.tx_fifo_errors++;
+ ndev->stats.tx_fifo_errors++;
if (status & BD_ENET_TX_CSL) /* Carrier lost */
- dev->stats.tx_carrier_errors++;
+ ndev->stats.tx_carrier_errors++;
} else {
- dev->stats.tx_packets++;
+ ndev->stats.tx_packets++;
}
if (status & BD_ENET_TX_READY)
@@ -429,7 +530,7 @@ fec_enet_tx(struct net_device *dev)
* but we eventually sent the packet OK.
*/
if (status & BD_ENET_TX_DEF)
- dev->stats.collisions++;
+ ndev->stats.collisions++;
/* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb);
@@ -446,8 +547,8 @@ fec_enet_tx(struct net_device *dev)
*/
if (fep->tx_full) {
fep->tx_full = 0;
- if (netif_queue_stopped(dev))
- netif_wake_queue(dev);
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
}
}
fep->dirty_tx = bdp;
@@ -461,9 +562,9 @@ fec_enet_tx(struct net_device *dev)
* effectively tossing the packet.
*/
static void
-fec_enet_rx(struct net_device *dev)
+fec_enet_rx(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
struct bufdesc *bdp;
@@ -497,17 +598,17 @@ fec_enet_rx(struct net_device *dev)
/* Check for errors. */
if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
BD_ENET_RX_CR | BD_ENET_RX_OV)) {
- dev->stats.rx_errors++;
+ ndev->stats.rx_errors++;
if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
/* Frame too long or too short. */
- dev->stats.rx_length_errors++;
+ ndev->stats.rx_length_errors++;
}
if (status & BD_ENET_RX_NO) /* Frame alignment */
- dev->stats.rx_frame_errors++;
+ ndev->stats.rx_frame_errors++;
if (status & BD_ENET_RX_CR) /* CRC Error */
- dev->stats.rx_crc_errors++;
+ ndev->stats.rx_crc_errors++;
if (status & BD_ENET_RX_OV) /* FIFO overrun */
- dev->stats.rx_fifo_errors++;
+ ndev->stats.rx_fifo_errors++;
}
/* Report late collisions as a frame error.
@@ -515,19 +616,19 @@ fec_enet_rx(struct net_device *dev)
* have in the buffer. So, just drop this frame on the floor.
*/
if (status & BD_ENET_RX_CL) {
- dev->stats.rx_errors++;
- dev->stats.rx_frame_errors++;
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_frame_errors++;
goto rx_processing_done;
}
/* Process the incoming frame. */
- dev->stats.rx_packets++;
+ ndev->stats.rx_packets++;
pkt_len = bdp->cbd_datlen;
- dev->stats.rx_bytes += pkt_len;
+ ndev->stats.rx_bytes += pkt_len;
data = (__u8*)__va(bdp->cbd_bufaddr);
- dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
- DMA_FROM_DEVICE);
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
swap_buffer(data, pkt_len);
@@ -541,18 +642,18 @@ fec_enet_rx(struct net_device *dev)
if (unlikely(!skb)) {
printk("%s: Memory squeeze, dropping packet.\n",
- dev->name);
- dev->stats.rx_dropped++;
+ ndev->name);
+ ndev->stats.rx_dropped++;
} else {
skb_reserve(skb, NET_IP_ALIGN);
skb_put(skb, pkt_len - 4); /* Make room */
skb_copy_to_linear_data(skb, data, pkt_len - 4);
- skb->protocol = eth_type_trans(skb, dev);
+ skb->protocol = eth_type_trans(skb, ndev);
netif_rx(skb);
}
- bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen,
- DMA_FROM_DEVICE);
+ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
+ FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
rx_processing_done:
/* Clear the status flags for this buffer */
status &= ~BD_ENET_RX_STATS;
@@ -577,10 +678,47 @@ rx_processing_done:
spin_unlock(&fep->hw_lock);
}
+static irqreturn_t
+fec_enet_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ uint int_events;
+ irqreturn_t ret = IRQ_NONE;
+
+ do {
+ int_events = readl(fep->hwp + FEC_IEVENT);
+ writel(int_events, fep->hwp + FEC_IEVENT);
+
+ if (int_events & FEC_ENET_RXF) {
+ ret = IRQ_HANDLED;
+ fec_enet_rx(ndev);
+ }
+
+ /* Transmit OK, or non-fatal error. Update the buffer
+ * descriptors. FEC handles all errors, we just discover
+ * them as part of the transmit process.
+ */
+ if (int_events & FEC_ENET_TXF) {
+ ret = IRQ_HANDLED;
+ fec_enet_tx(ndev);
+ }
+
+ if (int_events & FEC_ENET_MII) {
+ ret = IRQ_HANDLED;
+ complete(&fep->mdio_done);
+ }
+ } while (int_events);
+
+ return ret;
+}
+
+
+
/* ------------------------------------------------------------------------- */
-static void __inline__ fec_get_mac(struct net_device *dev)
+static void __inline__ fec_get_mac(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
unsigned char *iap, tmpaddr[ETH_ALEN];
@@ -616,11 +754,11 @@ static void __inline__ fec_get_mac(struct net_device *dev)
iap = &tmpaddr[0];
}
- memcpy(dev->dev_addr, iap, ETH_ALEN);
+ memcpy(ndev->dev_addr, iap, ETH_ALEN);
/* Adjust MAC if using macaddr */
if (iap == macaddr)
- dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
+ ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
}
/* ------------------------------------------------------------------------- */
@@ -628,9 +766,9 @@ static void __inline__ fec_get_mac(struct net_device *dev)
/*
* Phy section
*/
-static void fec_enet_adjust_link(struct net_device *dev)
+static void fec_enet_adjust_link(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct phy_device *phy_dev = fep->phy_dev;
unsigned long flags;
@@ -647,7 +785,7 @@ static void fec_enet_adjust_link(struct net_device *dev)
/* Duplex link change */
if (phy_dev->link) {
if (fep->full_duplex != phy_dev->duplex) {
- fec_restart(dev, phy_dev->duplex);
+ fec_restart(ndev, phy_dev->duplex);
status_change = 1;
}
}
@@ -656,9 +794,9 @@ static void fec_enet_adjust_link(struct net_device *dev)
if (phy_dev->link != fep->link) {
fep->link = phy_dev->link;
if (phy_dev->link)
- fec_restart(dev, phy_dev->duplex);
+ fec_restart(ndev, phy_dev->duplex);
else
- fec_stop(dev);
+ fec_stop(ndev);
status_change = 1;
}
@@ -727,9 +865,9 @@ static int fec_enet_mdio_reset(struct mii_bus *bus)
return 0;
}
-static int fec_enet_mii_probe(struct net_device *dev)
+static int fec_enet_mii_probe(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct phy_device *phy_dev = NULL;
char mdio_bus_id[MII_BUS_ID_SIZE];
char phy_name[MII_BUS_ID_SIZE + 3];
@@ -754,16 +892,16 @@ static int fec_enet_mii_probe(struct net_device *dev)
if (phy_id >= PHY_MAX_ADDR) {
printk(KERN_INFO "%s: no PHY, assuming direct connection "
- "to switch\n", dev->name);
+ "to switch\n", ndev->name);
strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
phy_id = 0;
}
snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
- phy_dev = phy_connect(dev, phy_name, &fec_enet_adjust_link, 0,
+ phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phy_dev)) {
- printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
+ printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name);
return PTR_ERR(phy_dev);
}
@@ -776,7 +914,7 @@ static int fec_enet_mii_probe(struct net_device *dev)
fep->full_duplex = 0;
printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
- "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
+ "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
fep->phy_dev->irq);
@@ -786,8 +924,8 @@ static int fec_enet_mii_probe(struct net_device *dev)
static int fec_enet_mii_init(struct platform_device *pdev)
{
static struct mii_bus *fec0_mii_bus;
- struct net_device *dev = platform_get_drvdata(pdev);
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
int err = -ENXIO, i;
@@ -845,7 +983,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
for (i = 0; i < PHY_MAX_ADDR; i++)
fep->mii_bus->irq[i] = PHY_POLL;
- platform_set_drvdata(dev, fep->mii_bus);
+ platform_set_drvdata(ndev, fep->mii_bus);
if (mdiobus_register(fep->mii_bus))
goto err_out_free_mdio_irq;
@@ -873,10 +1011,10 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep)
mdiobus_free(fep->mii_bus);
}
-static int fec_enet_get_settings(struct net_device *dev,
+static int fec_enet_get_settings(struct net_device *ndev,
struct ethtool_cmd *cmd)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct phy_device *phydev = fep->phy_dev;
if (!phydev)
@@ -885,10 +1023,10 @@ static int fec_enet_get_settings(struct net_device *dev,
return phy_ethtool_gset(phydev, cmd);
}
-static int fec_enet_set_settings(struct net_device *dev,
+static int fec_enet_set_settings(struct net_device *ndev,
struct ethtool_cmd *cmd)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct phy_device *phydev = fep->phy_dev;
if (!phydev)
@@ -897,14 +1035,14 @@ static int fec_enet_set_settings(struct net_device *dev,
return phy_ethtool_sset(phydev, cmd);
}
-static void fec_enet_get_drvinfo(struct net_device *dev,
+static void fec_enet_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
strcpy(info->driver, fep->pdev->dev.driver->name);
strcpy(info->version, "Revision: 1.0");
- strcpy(info->bus_info, dev_name(&dev->dev));
+ strcpy(info->bus_info, dev_name(&ndev->dev));
}
static struct ethtool_ops fec_enet_ethtool_ops = {
@@ -914,12 +1052,12 @@ static struct ethtool_ops fec_enet_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
-static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct phy_device *phydev = fep->phy_dev;
- if (!netif_running(dev))
+ if (!netif_running(ndev))
return -EINVAL;
if (!phydev)
@@ -928,9 +1066,9 @@ static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return phy_mii_ioctl(phydev, rq, cmd);
}
-static void fec_enet_free_buffers(struct net_device *dev)
+static void fec_enet_free_buffers(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
int i;
struct sk_buff *skb;
struct bufdesc *bdp;
@@ -940,7 +1078,7 @@ static void fec_enet_free_buffers(struct net_device *dev)
skb = fep->rx_skbuff[i];
if (bdp->cbd_bufaddr)
- dma_unmap_single(&dev->dev, bdp->cbd_bufaddr,
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
if (skb)
dev_kfree_skb(skb);
@@ -952,9 +1090,9 @@ static void fec_enet_free_buffers(struct net_device *dev)
kfree(fep->tx_bounce[i]);
}
-static int fec_enet_alloc_buffers(struct net_device *dev)
+static int fec_enet_alloc_buffers(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
int i;
struct sk_buff *skb;
struct bufdesc *bdp;
@@ -963,12 +1101,12 @@ static int fec_enet_alloc_buffers(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) {
skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
if (!skb) {
- fec_enet_free_buffers(dev);
+ fec_enet_free_buffers(ndev);
return -ENOMEM;
}
fep->rx_skbuff[i] = skb;
- bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data,
+ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
bdp->cbd_sc = BD_ENET_RX_EMPTY;
bdp++;
@@ -995,45 +1133,47 @@ static int fec_enet_alloc_buffers(struct net_device *dev)
}
static int
-fec_enet_open(struct net_device *dev)
+fec_enet_open(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
int ret;
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
*/
- ret = fec_enet_alloc_buffers(dev);
+ ret = fec_enet_alloc_buffers(ndev);
if (ret)
return ret;
/* Probe and connect to PHY when open the interface */
- ret = fec_enet_mii_probe(dev);
+ ret = fec_enet_mii_probe(ndev);
if (ret) {
- fec_enet_free_buffers(dev);
+ fec_enet_free_buffers(ndev);
return ret;
}
phy_start(fep->phy_dev);
- netif_start_queue(dev);
+ netif_start_queue(ndev);
fep->opened = 1;
return 0;
}
static int
-fec_enet_close(struct net_device *dev)
+fec_enet_close(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
/* Don't know what to do yet. */
fep->opened = 0;
- netif_stop_queue(dev);
- fec_stop(dev);
+ netif_stop_queue(ndev);
+ fec_stop(ndev);
- if (fep->phy_dev)
+ if (fep->phy_dev) {
+ phy_stop(fep->phy_dev);
phy_disconnect(fep->phy_dev);
+ }
- fec_enet_free_buffers(dev);
+ fec_enet_free_buffers(ndev);
return 0;
}
@@ -1051,14 +1191,14 @@ fec_enet_close(struct net_device *dev)
#define HASH_BITS 6 /* #bits in hash */
#define CRC32_POLY 0xEDB88320
-static void set_multicast_list(struct net_device *dev)
+static void set_multicast_list(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct netdev_hw_addr *ha;
unsigned int i, bit, data, crc, tmp;
unsigned char hash;
- if (dev->flags & IFF_PROMISC) {
+ if (ndev->flags & IFF_PROMISC) {
tmp = readl(fep->hwp + FEC_R_CNTRL);
tmp |= 0x8;
writel(tmp, fep->hwp + FEC_R_CNTRL);
@@ -1069,7 +1209,7 @@ static void set_multicast_list(struct net_device *dev)
tmp &= ~0x8;
writel(tmp, fep->hwp + FEC_R_CNTRL);
- if (dev->flags & IFF_ALLMULTI) {
+ if (ndev->flags & IFF_ALLMULTI) {
/* Catch all multicast addresses, so set the
* filter to all 1's
*/
@@ -1084,7 +1224,7 @@ static void set_multicast_list(struct net_device *dev)
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
- netdev_for_each_mc_addr(ha, dev) {
+ netdev_for_each_mc_addr(ha, ndev) {
/* Only support group multicast for now */
if (!(ha->addr[0] & 1))
continue;
@@ -1092,7 +1232,7 @@ static void set_multicast_list(struct net_device *dev)
/* calculate crc32 value of mac address */
crc = 0xffffffff;
- for (i = 0; i < dev->addr_len; i++) {
+ for (i = 0; i < ndev->addr_len; i++) {
data = ha->addr[i];
for (bit = 0; bit < 8; bit++, data >>= 1) {
crc = (crc >> 1) ^
@@ -1119,20 +1259,20 @@ static void set_multicast_list(struct net_device *dev)
/* Set a MAC change in hardware. */
static int
-fec_set_mac_address(struct net_device *dev, void *p)
+fec_set_mac_address(struct net_device *ndev, void *p)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
- writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) |
- (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24),
+ writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
+ (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
fep->hwp + FEC_ADDR_LOW);
- writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24),
+ writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
fep->hwp + FEC_ADDR_HIGH);
return 0;
}
@@ -1146,16 +1286,16 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
- .ndo_do_ioctl = fec_enet_ioctl,
+ .ndo_do_ioctl = fec_enet_ioctl,
};
/*
* XXX: We need to clean up on failure exits here.
*
*/
-static int fec_enet_init(struct net_device *dev)
+static int fec_enet_init(struct net_device *ndev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_private *fep = netdev_priv(ndev);
struct bufdesc *cbd_base;
struct bufdesc *bdp;
int i;
@@ -1170,20 +1310,19 @@ static int fec_enet_init(struct net_device *dev)
spin_lock_init(&fep->hw_lock);
- fep->hwp = (void __iomem *)dev->base_addr;
- fep->netdev = dev;
+ fep->netdev = ndev;
/* Get the Ethernet address */
- fec_get_mac(dev);
+ fec_get_mac(ndev);
/* Set receive and transmit descriptor base. */
fep->rx_bd_base = cbd_base;
fep->tx_bd_base = cbd_base + RX_RING_SIZE;
/* The FEC Ethernet specific entries in the device structure */
- dev->watchdog_timeo = TX_TIMEOUT;
- dev->netdev_ops = &fec_netdev_ops;
- dev->ethtool_ops = &fec_enet_ethtool_ops;
+ ndev->watchdog_timeo = TX_TIMEOUT;
+ ndev->netdev_ops = &fec_netdev_ops;
+ ndev->ethtool_ops = &fec_enet_ethtool_ops;
/* Initialize the receive buffer descriptors. */
bdp = fep->rx_bd_base;
@@ -1212,152 +1351,11 @@ static int fec_enet_init(struct net_device *dev)
bdp--;
bdp->cbd_sc |= BD_SC_WRAP;
- fec_restart(dev, 0);
+ fec_restart(ndev, 0);
return 0;
}
-/* This function is called to start or restart the FEC during a link
- * change. This only happens when switching between half and full
- * duplex.
- */
-static void
-fec_restart(struct net_device *dev, int duplex)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- const struct platform_device_id *id_entry =
- platform_get_device_id(fep->pdev);
- int i;
- u32 val, temp_mac[2];
-
- /* Whack a reset. We should wait for this. */
- writel(1, fep->hwp + FEC_ECNTRL);
- udelay(10);
-
- /*
- * enet-mac reset will reset mac address registers too,
- * so need to reconfigure it.
- */
- if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
- memcpy(&temp_mac, dev->dev_addr, ETH_ALEN);
- writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
- writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
- }
-
- /* Clear any outstanding interrupt. */
- writel(0xffc00000, fep->hwp + FEC_IEVENT);
-
- /* Reset all multicast. */
- writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
- writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
-#ifndef CONFIG_M5272
- writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
- writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
-#endif
-
- /* Set maximum receive buffer size. */
- writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
-
- /* Set receive and transmit descriptor base. */
- writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
- writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
- fep->hwp + FEC_X_DES_START);
-
- fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
- fep->cur_rx = fep->rx_bd_base;
-
- /* Reset SKB transmit buffers. */
- fep->skb_cur = fep->skb_dirty = 0;
- for (i = 0; i <= TX_RING_MOD_MASK; i++) {
- if (fep->tx_skbuff[i]) {
- dev_kfree_skb_any(fep->tx_skbuff[i]);
- fep->tx_skbuff[i] = NULL;
- }
- }
-
- /* Enable MII mode */
- if (duplex) {
- /* MII enable / FD enable */
- writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
- writel(0x04, fep->hwp + FEC_X_CNTRL);
- } else {
- /* MII enable / No Rcv on Xmit */
- writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL);
- writel(0x0, fep->hwp + FEC_X_CNTRL);
- }
- fep->full_duplex = duplex;
-
- /* Set MII speed */
- writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
-
- /*
- * The phy interface and speed need to get configured
- * differently on enet-mac.
- */
- if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
- val = readl(fep->hwp + FEC_R_CNTRL);
-
- /* MII or RMII */
- if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
- val |= (1 << 8);
- else
- val &= ~(1 << 8);
-
- /* 10M or 100M */
- if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
- val &= ~(1 << 9);
- else
- val |= (1 << 9);
-
- writel(val, fep->hwp + FEC_R_CNTRL);
- } else {
-#ifdef FEC_MIIGSK_ENR
- if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
- /* disable the gasket and wait */
- writel(0, fep->hwp + FEC_MIIGSK_ENR);
- while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
- udelay(1);
-
- /*
- * configure the gasket:
- * RMII, 50 MHz, no loopback, no echo
- */
- writel(1, fep->hwp + FEC_MIIGSK_CFGR);
-
- /* re-enable the gasket */
- writel(2, fep->hwp + FEC_MIIGSK_ENR);
- }
-#endif
- }
-
- /* And last, enable the transmit and receive processing */
- writel(2, fep->hwp + FEC_ECNTRL);
- writel(0, fep->hwp + FEC_R_DES_ACTIVE);
-
- /* Enable interrupts we wish to service */
- writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
-}
-
-static void
-fec_stop(struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
-
- /* We cannot expect a graceful transmit stop without link !!! */
- if (fep->link) {
- writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
- udelay(10);
- if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
- printk("fec_stop : Graceful transmit stop did not complete !\n");
- }
-
- /* Whack a reset. We should wait for this. */
- writel(1, fep->hwp + FEC_ECNTRL);
- udelay(10);
- writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
- writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
-}
-
static int __devinit
fec_probe(struct platform_device *pdev)
{
@@ -1377,19 +1375,20 @@ fec_probe(struct platform_device *pdev)
/* Init network device */
ndev = alloc_etherdev(sizeof(struct fec_enet_private));
- if (!ndev)
- return -ENOMEM;
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto failed_alloc_etherdev;
+ }
SET_NETDEV_DEV(ndev, &pdev->dev);
/* setup board info structure */
fep = netdev_priv(ndev);
- memset(fep, 0, sizeof(*fep));
- ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r));
+ fep->hwp = ioremap(r->start, resource_size(r));
fep->pdev = pdev;
- if (!ndev->base_addr) {
+ if (!fep->hwp) {
ret = -ENOMEM;
goto failed_ioremap;
}
@@ -1407,10 +1406,9 @@ fec_probe(struct platform_device *pdev)
break;
ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
if (ret) {
- while (i >= 0) {
+ while (--i >= 0) {
irq = platform_get_irq(pdev, i);
free_irq(irq, ndev);
- i--;
}
goto failed_irq;
}
@@ -1453,9 +1451,11 @@ failed_clk:
free_irq(irq, ndev);
}
failed_irq:
- iounmap((void __iomem *)ndev->base_addr);
+ iounmap(fep->hwp);
failed_ioremap:
free_netdev(ndev);
+failed_alloc_etherdev:
+ release_mem_region(r->start, resource_size(r));
return ret;
}
@@ -1465,16 +1465,22 @@ fec_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
-
- platform_set_drvdata(pdev, NULL);
+ struct resource *r;
fec_stop(ndev);
fec_enet_mii_remove(fep);
clk_disable(fep->clk);
clk_put(fep->clk);
- iounmap((void __iomem *)ndev->base_addr);
+ iounmap(fep->hwp);
unregister_netdev(ndev);
free_netdev(ndev);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ BUG_ON(!r);
+ release_mem_region(r->start, resource_size(r));
+
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
@@ -1483,16 +1489,14 @@ static int
fec_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
- struct fec_enet_private *fep;
+ struct fec_enet_private *fep = netdev_priv(ndev);
- if (ndev) {
- fep = netdev_priv(ndev);
- if (netif_running(ndev)) {
- fec_stop(ndev);
- netif_device_detach(ndev);
- }
- clk_disable(fep->clk);
+ if (netif_running(ndev)) {
+ fec_stop(ndev);
+ netif_device_detach(ndev);
}
+ clk_disable(fep->clk);
+
return 0;
}
@@ -1500,16 +1504,14 @@ static int
fec_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
- struct fec_enet_private *fep;
+ struct fec_enet_private *fep = netdev_priv(ndev);
- if (ndev) {
- fep = netdev_priv(ndev);
- clk_enable(fep->clk);
- if (netif_running(ndev)) {
- fec_restart(ndev, fep->full_duplex);
- netif_device_attach(ndev);
- }
+ clk_enable(fep->clk);
+ if (netif_running(ndev)) {
+ fec_restart(ndev, fep->full_duplex);
+ netif_device_attach(ndev);
}
+
return 0;
}
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index ac1d323..8931168 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -400,13 +400,14 @@ static void *bpq_seq_start(struct seq_file *seq, loff_t *pos)
static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct list_head *p;
+ struct bpqdev *bpqdev = v;
++*pos;
if (v == SEQ_START_TOKEN)
- p = rcu_dereference(bpq_devices.next);
+ p = rcu_dereference(list_next_rcu(&bpq_devices));
else
- p = rcu_dereference(((struct bpqdev *)v)->bpq_list.next);
+ p = rcu_dereference(list_next_rcu(&bpqdev->bpq_list));
return (p == &bpq_devices) ? NULL
: list_entry(p, struct bpqdev, bpq_list);
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 0a2368f..65c1833 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -129,6 +129,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
break;
case E1000_DEV_ID_82580_COPPER:
case E1000_DEV_ID_82580_FIBER:
+ case E1000_DEV_ID_82580_QUAD_FIBER:
case E1000_DEV_ID_82580_SERDES:
case E1000_DEV_ID_82580_SGMII:
case E1000_DEV_ID_82580_COPPER_DUAL:
@@ -237,9 +238,15 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
size = 14;
nvm->word_size = 1 << size;
- /* if 82576 then initialize mailbox parameters */
- if (mac->type == e1000_82576)
+ /* if part supports SR-IOV then initialize mailbox parameters */
+ switch (mac->type) {
+ case e1000_82576:
+ case e1000_i350:
igb_init_mbx_params_pf(hw);
+ break;
+ default:
+ break;
+ }
/* setup PHY parameters */
if (phy->media_type != e1000_media_type_copper) {
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index e2638af..281324e 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -54,6 +54,7 @@ struct e1000_hw;
#define E1000_DEV_ID_82580_SERDES 0x1510
#define E1000_DEV_ID_82580_SGMII 0x1511
#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
+#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527
#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c
index c474cdb..78d48c7 100644
--- a/drivers/net/igb/e1000_mbx.c
+++ b/drivers/net/igb/e1000_mbx.c
@@ -422,26 +422,24 @@ s32 igb_init_mbx_params_pf(struct e1000_hw *hw)
{
struct e1000_mbx_info *mbx = &hw->mbx;
- if (hw->mac.type == e1000_82576) {
- mbx->timeout = 0;
- mbx->usec_delay = 0;
-
- mbx->size = E1000_VFMAILBOX_SIZE;
-
- mbx->ops.read = igb_read_mbx_pf;
- mbx->ops.write = igb_write_mbx_pf;
- mbx->ops.read_posted = igb_read_posted_mbx;
- mbx->ops.write_posted = igb_write_posted_mbx;
- mbx->ops.check_for_msg = igb_check_for_msg_pf;
- mbx->ops.check_for_ack = igb_check_for_ack_pf;
- mbx->ops.check_for_rst = igb_check_for_rst_pf;
-
- mbx->stats.msgs_tx = 0;
- mbx->stats.msgs_rx = 0;
- mbx->stats.reqs = 0;
- mbx->stats.acks = 0;
- mbx->stats.rsts = 0;
- }
+ mbx->timeout = 0;
+ mbx->usec_delay = 0;
+
+ mbx->size = E1000_VFMAILBOX_SIZE;
+
+ mbx->ops.read = igb_read_mbx_pf;
+ mbx->ops.write = igb_write_mbx_pf;
+ mbx->ops.read_posted = igb_read_posted_mbx;
+ mbx->ops.write_posted = igb_write_posted_mbx;
+ mbx->ops.check_for_msg = igb_check_for_msg_pf;
+ mbx->ops.check_for_ack = igb_check_for_ack_pf;
+ mbx->ops.check_for_rst = igb_check_for_rst_pf;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
return 0;
}
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 58c665b..cb6bf7b 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -68,6 +68,7 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
@@ -2286,9 +2287,14 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
spin_lock_init(&adapter->stats64_lock);
#ifdef CONFIG_PCI_IOV
- if (hw->mac.type == e1000_82576)
+ switch (hw->mac.type) {
+ case e1000_82576:
+ case e1000_i350:
adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs;
-
+ break;
+ default:
+ break;
+ }
#endif /* CONFIG_PCI_IOV */
adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 3b8c924..12769b5 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -334,6 +334,10 @@ struct ixgbe_adapter {
u16 bd_number;
struct work_struct reset_task;
struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+
+ /* DCB parameters */
+ struct ieee_pfc *ixgbe_ieee_pfc;
+ struct ieee_ets *ixgbe_ieee_ets;
struct ixgbe_dcb_config dcb_cfg;
struct ixgbe_dcb_config temp_dcb_cfg;
u8 dcb_set_bitmap;
@@ -521,7 +525,6 @@ extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
extern int ethtool_ioctl(struct ifreq *ifr);
-extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index);
extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 66ed045..90cceb4 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -29,6 +29,7 @@
#define _IXGBE_COMMON_H_
#include "ixgbe_type.h"
+#include "ixgbe.h"
u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
@@ -110,9 +111,8 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
-extern struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw);
#define hw_dbg(hw, format, arg...) \
- netdev_dbg(ixgbe_get_hw_dev(hw), format, ##arg)
+ netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg)
#define e_dev_info(format, arg...) \
dev_info(&adapter->pdev->dev, format, ## arg)
#define e_dev_warn(format, arg...) \
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index d16c260..13c962e 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -34,6 +34,42 @@
#include "ixgbe_dcb_82599.h"
/**
+ * ixgbe_ieee_credits - This calculates the ieee traffic class
+ * credits from the configured bandwidth percentages. Credits
+ * are the smallest unit programable into the underlying
+ * hardware. The IEEE 802.1Qaz specification do not use bandwidth
+ * groups so this is much simplified from the CEE case.
+ */
+s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame)
+{
+ int min_percent = 100;
+ int min_credit, multiplier;
+ int i;
+
+ min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
+ DCB_CREDIT_QUANTUM;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ if (bw[i] < min_percent && bw[i])
+ min_percent = bw[i];
+ }
+
+ multiplier = (min_credit / min_percent) + 1;
+
+ /* Find out the hw credits for each TC */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL);
+
+ if (val < min_credit)
+ val = min_credit;
+ refill[i] = val;
+
+ max[i] = (bw[i] * MAX_CREDIT)/100;
+ }
+ return 0;
+}
+
+/**
* ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
* @ixgbe_dcb_config: Struct containing DCB settings.
* @direction: Configuring either Tx or Rx.
@@ -141,6 +177,59 @@ out:
return ret_val;
}
+void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
+{
+ int i;
+
+ *pfc_en = 0;
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+ *pfc_en |= (cfg->tc_config[i].dcb_pfc & 0xF) << i;
+}
+
+void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction,
+ u16 *refill)
+{
+ struct tc_bw_alloc *p;
+ int i;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ p = &cfg->tc_config[i].path[direction];
+ refill[i] = p->data_credits_refill;
+ }
+}
+
+void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max)
+{
+ int i;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+ max[i] = cfg->tc_config[i].desc_credits_max;
+}
+
+void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction,
+ u8 *bwgid)
+{
+ struct tc_bw_alloc *p;
+ int i;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ p = &cfg->tc_config[i].path[direction];
+ bwgid[i] = p->bwg_id;
+ }
+}
+
+void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
+ u8 *ptype)
+{
+ struct tc_bw_alloc *p;
+ int i;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ p = &cfg->tc_config[i].path[direction];
+ ptype[i] = p->prio_type;
+ }
+}
+
/**
* ixgbe_dcb_hw_config - Config and enable DCB
* @hw: pointer to hardware structure
@@ -152,13 +241,30 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
s32 ret = 0;
+ u8 pfc_en;
+ u8 ptype[MAX_TRAFFIC_CLASS];
+ u8 bwgid[MAX_TRAFFIC_CLASS];
+ u16 refill[MAX_TRAFFIC_CLASS];
+ u16 max[MAX_TRAFFIC_CLASS];
+
+ /* Unpack CEE standard containers */
+ ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en);
+ ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill);
+ ixgbe_dcb_unpack_max(dcb_config, max);
+ ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid);
+ ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype);
+
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
- ret = ixgbe_dcb_hw_config_82598(hw, dcb_config);
+ ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->rx_pba_cfg,
+ pfc_en, refill, max, bwgid,
+ ptype);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
- ret = ixgbe_dcb_hw_config_82599(hw, dcb_config);
+ ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->rx_pba_cfg,
+ pfc_en, refill, max, bwgid,
+ ptype);
break;
default:
break;
@@ -166,3 +272,70 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
return ret;
}
+/* Helper routines to abstract HW specifics from DCB netlink ops */
+s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en)
+{
+ int ret = -EINVAL;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
+ u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa)
+{
+ int i;
+ u8 prio_type[IEEE_8021QAZ_MAX_TCS];
+
+ /* Map TSA onto CEE prio type */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ switch (tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ prio_type[i] = 2;
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ prio_type[i] = 0;
+ break;
+ default:
+ /* Hardware only supports priority strict or
+ * ETS transmission selection algorithms if
+ * we receive some other value from dcbnl
+ * throw an error
+ */
+ return -EINVAL;
+ }
+ }
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max,
+ prio_type);
+ ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
+ bwg_id, prio_type);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
+ bwg_id, prio_type);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 1cfe38e..e593511 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -139,7 +139,6 @@ struct ixgbe_dcb_config {
struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
bool pfc_mode_enable;
- bool round_robin_enable;
enum dcb_rx_pba_cfg rx_pba_cfg;
@@ -148,12 +147,21 @@ struct ixgbe_dcb_config {
};
/* DCB driver APIs */
+void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en);
+void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *);
+void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *);
+void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *);
+void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);
/* DCB credits calculation */
+s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame);
s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
struct ixgbe_dcb_config *, int, u8);
/* DCB hw initialization */
+s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
+ u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type);
+s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en);
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
/* DCB definitions for credit calculation */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index 9a5e89c..2965edc 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -38,15 +38,14 @@
*
* Configure packet buffers for DCB mode.
*/
-static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, u8 rx_pba)
{
s32 ret_val = 0;
u32 value = IXGBE_RXPBSIZE_64KB;
u8 i = 0;
/* Setup Rx packet buffer sizes */
- switch (dcb_config->rx_pba_cfg) {
+ switch (rx_pba) {
case pba_80_48:
/* Setup the first four at 80KB */
value = IXGBE_RXPBSIZE_80KB;
@@ -78,10 +77,11 @@ static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
*
* Configure Rx Data Arbiter and credits for each traffic class.
*/
-static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *prio_type)
{
- struct tc_bw_alloc *p;
u32 reg = 0;
u32 credit_refill = 0;
u32 credit_max = 0;
@@ -102,13 +102,12 @@ static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
- credit_refill = p->data_credits_refill;
- credit_max = p->data_credits_max;
+ credit_refill = refill[i];
+ credit_max = max[i];
reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
- if (p->prio_type == prio_link)
+ if (prio_type[i] == prio_link)
reg |= IXGBE_RT2CR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
@@ -135,10 +134,12 @@ static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
-static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type)
{
- struct tc_bw_alloc *p;
u32 reg, max_credits;
u8 i;
@@ -146,10 +147,8 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
/* Enable arbiter */
reg &= ~IXGBE_DPMCS_ARBDIS;
- if (!(dcb_config->round_robin_enable)) {
- /* Enable DFP and Recycle mode */
- reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
- }
+ /* Enable DFP and Recycle mode */
+ reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
reg |= IXGBE_DPMCS_TSOEF;
/* Configure Max TSO packet size 34KB including payload and headers */
reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
@@ -158,16 +157,15 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
- max_credits = dcb_config->tc_config[i].desc_credits_max;
+ max_credits = max[i];
reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
- reg |= p->data_credits_refill;
- reg |= (u32)(p->bwg_id) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
+ reg |= refill[i];
+ reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
- if (p->prio_type == prio_group)
+ if (prio_type[i] == prio_group)
reg |= IXGBE_TDTQ2TCCR_GSP;
- if (p->prio_type == prio_link)
+ if (prio_type[i] == prio_link)
reg |= IXGBE_TDTQ2TCCR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
@@ -183,10 +181,12 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Tx Data Arbiter and credits for each traffic class.
*/
-static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type)
{
- struct tc_bw_alloc *p;
u32 reg;
u8 i;
@@ -200,15 +200,14 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
- reg = p->data_credits_refill;
- reg |= (u32)(p->data_credits_max) << IXGBE_TDPT2TCCR_MCL_SHIFT;
- reg |= (u32)(p->bwg_id) << IXGBE_TDPT2TCCR_BWG_SHIFT;
+ reg = refill[i];
+ reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
+ reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
- if (p->prio_type == prio_group)
+ if (prio_type[i] == prio_group)
reg |= IXGBE_TDPT2TCCR_GSP;
- if (p->prio_type == prio_link)
+ if (prio_type[i] == prio_link)
reg |= IXGBE_TDPT2TCCR_LSP;
IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
@@ -229,13 +228,12 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
*
* Configure Priority Flow Control for each traffic class.
*/
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
{
u32 reg, rx_pba_size;
u8 i;
- if (!dcb_config->pfc_mode_enable)
+ if (!pfc_en)
goto out;
/* Enable Transmit Priority Flow Control */
@@ -256,19 +254,20 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
* for each traffic class.
*/
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ int enabled = pfc_en & (1 << i);
rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
reg = (rx_pba_size - hw->fc.low_water) << 10;
- if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
- dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
+ if (enabled == pfc_enabled_tx ||
+ enabled == pfc_enabled_full)
reg |= IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
reg = (rx_pba_size - hw->fc.high_water) << 10;
- if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
- dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
+ if (enabled == pfc_enabled_tx ||
+ enabled == pfc_enabled_full)
reg |= IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
@@ -292,7 +291,7 @@ out:
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
-static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
+s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
@@ -325,13 +324,16 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ u8 rx_pba, u8 pfc_en, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type)
{
- ixgbe_dcb_config_packet_buffers_82598(hw, dcb_config);
- ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
- ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
- ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config);
- ixgbe_dcb_config_pfc_82598(hw, dcb_config);
+ ixgbe_dcb_config_packet_buffers_82598(hw, rx_pba);
+ ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
+ ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_pfc_82598(hw, pfc_en);
ixgbe_dcb_config_tc_stats_82598(hw);
return 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h
index abc03cc..0d2a758 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -71,9 +71,28 @@
/* DCB hardware-specific driver APIs */
/* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
/* DCB hw initialization */
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type);
+
+s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
+ u8 rx_pba, u8 pfc_en, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type);
#endif /* _DCB_82598_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 374e1f7..b0d97a9 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -33,19 +33,18 @@
/**
* ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @rx_pba: method to distribute packet buffer
*
* Configure packet buffers for DCB mode.
*/
-static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba)
{
s32 ret_val = 0;
u32 value = IXGBE_RXPBSIZE_64KB;
u8 i = 0;
/* Setup Rx packet buffer sizes */
- switch (dcb_config->rx_pba_cfg) {
+ switch (rx_pba) {
case pba_80_48:
/* Setup the first four at 80KB */
value = IXGBE_RXPBSIZE_80KB;
@@ -75,14 +74,19 @@ static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure Rx Packet Arbiter and credits for each traffic class.
*/
-static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type)
{
- struct tc_bw_alloc *p;
u32 reg = 0;
u32 credit_refill = 0;
u32 credit_max = 0;
@@ -103,15 +107,13 @@ static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
-
- credit_refill = p->data_credits_refill;
- credit_max = p->data_credits_max;
+ credit_refill = refill[i];
+ credit_max = max[i];
reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
- reg |= (u32)(p->bwg_id) << IXGBE_RTRPT4C_BWG_SHIFT;
+ reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
- if (p->prio_type == prio_link)
+ if (prio_type[i] == prio_link)
reg |= IXGBE_RTRPT4C_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
@@ -130,14 +132,19 @@ static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure Tx Descriptor Arbiter and credits for each traffic class.
*/
-static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type)
{
- struct tc_bw_alloc *p;
u32 reg, max_credits;
u8 i;
@@ -149,16 +156,15 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
- max_credits = dcb_config->tc_config[i].desc_credits_max;
+ max_credits = max[i];
reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
- reg |= p->data_credits_refill;
- reg |= (u32)(p->bwg_id) << IXGBE_RTTDT2C_BWG_SHIFT;
+ reg |= refill[i];
+ reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
- if (p->prio_type == prio_group)
+ if (prio_type[i] == prio_group)
reg |= IXGBE_RTTDT2C_GSP;
- if (p->prio_type == prio_link)
+ if (prio_type[i] == prio_link)
reg |= IXGBE_RTTDT2C_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
@@ -177,14 +183,19 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
*
* Configure Tx Packet Arbiter and credits for each traffic class.
*/
-static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type)
{
- struct tc_bw_alloc *p;
u32 reg;
u8 i;
@@ -205,15 +216,14 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
/* Configure traffic class credits and priority */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
- reg = p->data_credits_refill;
- reg |= (u32)(p->data_credits_max) << IXGBE_RTTPT2C_MCL_SHIFT;
- reg |= (u32)(p->bwg_id) << IXGBE_RTTPT2C_BWG_SHIFT;
+ reg = refill[i];
+ reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
+ reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
- if (p->prio_type == prio_group)
+ if (prio_type[i] == prio_group)
reg |= IXGBE_RTTPT2C_GSP;
- if (p->prio_type == prio_link)
+ if (prio_type[i] == prio_link)
reg |= IXGBE_RTTPT2C_LSP;
IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
@@ -233,17 +243,16 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
/**
* ixgbe_dcb_config_pfc_82599 - Configure priority flow control
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @pfc_en: enabled pfc bitmask
*
* Configure Priority Flow Control (PFC) for each traffic class.
*/
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en)
{
u32 i, reg, rx_pba_size;
/* If PFC is disabled globally then fall back to LFC. */
- if (!dcb_config->pfc_mode_enable) {
+ if (!pfc_en) {
for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
hw->mac.ops.fc_enable(hw, i);
goto out;
@@ -251,19 +260,18 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ int enabled = pfc_en & (1 << i);
rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
reg = (rx_pba_size - hw->fc.low_water) << 10;
- if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
- dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
+ if (enabled)
reg |= IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
reg = (rx_pba_size - hw->fc.high_water) << 10;
- if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
- dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
+ if (enabled)
reg |= IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
}
@@ -349,7 +357,6 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
/**
* ixgbe_dcb_config_82599 - Configure general DCB parameters
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
*
* Configure general DCB parameters.
*/
@@ -406,19 +413,27 @@ static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
/**
* ixgbe_dcb_hw_config_82599 - Configure and enable DCB
* @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @rx_pba: method to distribute packet buffer
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
+ * @pfc_en: enabled pfc bitmask
*
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ u8 rx_pba, u8 pfc_en, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type)
{
- ixgbe_dcb_config_packet_buffers_82599(hw, dcb_config);
+ ixgbe_dcb_config_packet_buffers_82599(hw, rx_pba);
ixgbe_dcb_config_82599(hw);
- ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config);
- ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config);
- ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config);
- ixgbe_dcb_config_pfc_82599(hw, dcb_config);
+ ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, prio_type);
+ ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
+ bwg_id, prio_type);
+ ixgbe_dcb_config_pfc_82599(hw, pfc_en);
ixgbe_dcb_config_tc_stats_82599(hw);
return 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 3841649..5b0ca85 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -102,11 +102,29 @@
/* DCB hardware-specific driver APIs */
/* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config);
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en);
/* DCB hw initialization */
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
+ u16 *refill,
+ u16 *max,
+ u8 *bwg_id,
+ u8 *prio_type);
+
s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *config);
+ u8 rx_pba, u8 pfc_en, u16 *refill,
+ u16 *max, u8 *bwg_id, u8 *prio_type);
#endif /* _DCB_82599_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index bf566e8..a977df3 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -37,7 +37,6 @@
#define BIT_PG_RX 0x04
#define BIT_PG_TX 0x08
#define BIT_APP_UPCHG 0x10
-#define BIT_RESETLINK 0x40
#define BIT_LINKSPEED 0x80
/* Responses for the DCB_C_SET_ALL command */
@@ -225,10 +224,8 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
(adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
(adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
- adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) {
+ adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
adapter->dcb_set_bitmap |= BIT_PG_TX;
- adapter->dcb_set_bitmap |= BIT_RESETLINK;
- }
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -239,10 +236,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
- adapter->dcb_cfg.bw_percentage[0][bwg_id]) {
+ adapter->dcb_cfg.bw_percentage[0][bwg_id])
adapter->dcb_set_bitmap |= BIT_PG_TX;
- adapter->dcb_set_bitmap |= BIT_RESETLINK;
- }
}
static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
@@ -269,10 +264,8 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
(adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
(adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
- adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) {
+ adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
adapter->dcb_set_bitmap |= BIT_PG_RX;
- adapter->dcb_set_bitmap |= BIT_RESETLINK;
- }
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
@@ -283,10 +276,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
- adapter->dcb_cfg.bw_percentage[1][bwg_id]) {
+ adapter->dcb_cfg.bw_percentage[1][bwg_id])
adapter->dcb_set_bitmap |= BIT_PG_RX;
- adapter->dcb_set_bitmap |= BIT_RESETLINK;
- }
}
static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
@@ -365,21 +356,17 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
return DCB_NO_HW_CHG;
/*
- * Only take down the adapter if the configuration change
- * requires a reset.
+ * Only take down the adapter if an app change occured. FCoE
+ * may shuffle tx rings in this case and this can not be done
+ * without a reset currently.
*/
- if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
+ if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
msleep(1);
- if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
- if (netif_running(netdev))
- netdev->netdev_ops->ndo_stop(netdev);
- ixgbe_clear_interrupt_scheme(adapter);
- } else {
- if (netif_running(netdev))
- ixgbe_down(adapter);
- }
+ if (netif_running(netdev))
+ netdev->netdev_ops->ndo_stop(netdev);
+ ixgbe_clear_interrupt_scheme(adapter);
}
if (adapter->dcb_cfg.pfc_mode_enable) {
@@ -408,29 +395,51 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
}
}
- if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
- if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
- ixgbe_init_interrupt_scheme(adapter);
- if (netif_running(netdev))
- netdev->netdev_ops->ndo_open(netdev);
- } else {
- if (netif_running(netdev))
- ixgbe_up(adapter);
- }
+ if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
+ ixgbe_init_interrupt_scheme(adapter);
+ if (netif_running(netdev))
+ netdev->netdev_ops->ndo_open(netdev);
ret = DCB_HW_CHG_RST;
- } else if (adapter->dcb_set_bitmap & BIT_PFC) {
- if (adapter->hw.mac.type == ixgbe_mac_82598EB)
- ixgbe_dcb_config_pfc_82598(&adapter->hw,
- &adapter->dcb_cfg);
- else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
- ixgbe_dcb_config_pfc_82599(&adapter->hw,
- &adapter->dcb_cfg);
+ }
+
+ if (adapter->dcb_set_bitmap & BIT_PFC) {
+ u8 pfc_en;
+ ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
+ ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en);
ret = DCB_HW_CHG;
}
+
+ if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
+ u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
+ u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
+ int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+
+#ifdef CONFIG_FCOE
+ if (adapter->netdev->features & NETIF_F_FCOE_MTU)
+ max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
+#endif
+
+ ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
+ max_frame, DCB_TX_CONFIG);
+ ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
+ max_frame, DCB_RX_CONFIG);
+
+ ixgbe_dcb_unpack_refill(&adapter->dcb_cfg,
+ DCB_TX_CONFIG, refill);
+ ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max);
+ ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg,
+ DCB_TX_CONFIG, bwg_id);
+ ixgbe_dcb_unpack_prio(&adapter->dcb_cfg,
+ DCB_TX_CONFIG, prio_type);
+
+ ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
+ bwg_id, prio_type);
+ }
+
if (adapter->dcb_cfg.pfc_mode_enable)
adapter->hw.fc.current_mode = ixgbe_fc_pfc;
- if (adapter->dcb_set_bitmap & BIT_RESETLINK)
+ if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
clear_bit(__IXGBE_RESETTING, &adapter->state);
adapter->dcb_set_bitmap = 0x00;
return ret;
@@ -568,18 +577,29 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
case DCB_APP_IDTYPE_ETHTYPE:
#ifdef IXGBE_FCOE
if (id == ETH_P_FCOE) {
- u8 tc;
- struct ixgbe_adapter *adapter;
+ u8 old_tc;
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
- adapter = netdev_priv(netdev);
- tc = adapter->fcoe.tc;
+ /* Get current programmed tc */
+ old_tc = adapter->fcoe.tc;
rval = ixgbe_fcoe_setapp(adapter, up);
- if ((!rval) && (tc != adapter->fcoe.tc) &&
- (adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
- (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
+
+ if (rval ||
+ !(adapter->flags & IXGBE_FLAG_DCB_ENABLED) ||
+ !(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ break;
+
+ /* The FCoE application priority may be changed multiple
+ * times in quick sucession with switches that build up
+ * TLVs. To avoid creating uneeded device resets this
+ * checks the actual HW configuration and clears
+ * BIT_APP_UPCHG if a HW configuration change is not
+ * need
+ */
+ if (old_tc == adapter->fcoe.tc)
+ adapter->dcb_set_bitmap &= ~BIT_APP_UPCHG;
+ else
adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
- adapter->dcb_set_bitmap |= BIT_RESETLINK;
- }
}
#endif
break;
@@ -591,7 +611,98 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
return rval;
}
+static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets;
+
+ /* No IEEE PFC settings available */
+ if (!my_ets)
+ return -EINVAL;
+
+ ets->ets_cap = MAX_TRAFFIC_CLASS;
+ ets->cbs = my_ets->cbs;
+ memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
+ memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
+ return 0;
+}
+
+static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
+ int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ int err;
+ /* naively give each TC a bwg to map onto CEE hardware */
+ __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7};
+
+ if (!adapter->ixgbe_ieee_ets) {
+ adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets),
+ GFP_KERNEL);
+ if (!adapter->ixgbe_ieee_ets)
+ return -ENOMEM;
+ }
+
+
+ memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
+
+ ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame);
+ err = ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
+ bwg_id, ets->tc_tsa);
+ return err;
+}
+
+static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc;
+ int i;
+
+ /* No IEEE PFC settings available */
+ if (!my_pfc)
+ return -EINVAL;
+
+ pfc->pfc_cap = MAX_TRAFFIC_CLASS;
+ pfc->pfc_en = my_pfc->pfc_en;
+ pfc->mbc = my_pfc->mbc;
+ pfc->delay = my_pfc->delay;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ pfc->requests[i] = adapter->stats.pxoffrxc[i];
+ pfc->indications[i] = adapter->stats.pxofftxc[i];
+ }
+
+ return 0;
+}
+
+static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ int err;
+
+ if (!adapter->ixgbe_ieee_pfc) {
+ adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc),
+ GFP_KERNEL);
+ if (!adapter->ixgbe_ieee_pfc)
+ return -ENOMEM;
+ }
+
+ memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc));
+ err = ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en);
+ return err;
+}
+
const struct dcbnl_rtnl_ops dcbnl_ops = {
+ .ieee_getets = ixgbe_dcbnl_ieee_getets,
+ .ieee_setets = ixgbe_dcbnl_ieee_setets,
+ .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc,
+ .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc,
.getstate = ixgbe_dcbnl_get_state,
.setstate = ixgbe_dcbnl_set_state,
.getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 2002ea8..309272f8 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -152,7 +152,17 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->supported |= (SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg);
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
+ ecmd->supported |= SUPPORTED_100baseT_Full;
+ break;
+ default:
+ break;
+ }
+
ecmd->advertising = ADVERTISED_Autoneg;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
ecmd->advertising |= ADVERTISED_10000baseT_Full;
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
@@ -167,6 +177,15 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->advertising |= (ADVERTISED_10000baseT_Full |
ADVERTISED_1000baseT_Full);
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
+ if (!(ecmd->advertising & ADVERTISED_100baseT_Full))
+ ecmd->advertising |= (ADVERTISED_100baseT_Full);
+ break;
+ default:
+ break;
+ }
+
if (hw->phy.media_type == ixgbe_media_type_copper) {
ecmd->supported |= SUPPORTED_TP;
ecmd->advertising |= ADVERTISED_TP;
@@ -271,8 +290,19 @@ static int ixgbe_get_settings(struct net_device *netdev,
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
if (link_up) {
- ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
- SPEED_10000 : SPEED_1000;
+ switch (link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ ecmd->speed = SPEED_10000;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ ecmd->speed = SPEED_1000;
+ break;
+ case IXGBE_LINK_SPEED_100_FULL:
+ ecmd->speed = SPEED_100;
+ break;
+ default:
+ break;
+ }
ecmd->duplex = DUPLEX_FULL;
} else {
ecmd->speed = -1;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 30f9ccf..eca762d 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -648,7 +648,7 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
*
* Returns : a tc index for use in range 0-7, or 0-3
*/
-u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
+static u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
{
int tc = -1;
int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
@@ -5174,7 +5174,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
adapter->dcb_cfg.rx_pba_cfg = pba_equal;
adapter->dcb_cfg.pfc_mode_enable = false;
- adapter->dcb_cfg.round_robin_enable = false;
adapter->dcb_set_bitmap = 0x00;
ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
adapter->ring_feature[RING_F_DCB].indices);
@@ -5611,6 +5610,10 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
}
ixgbe_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_DCB
+ kfree(adapter->ixgbe_ieee_pfc);
+ kfree(adapter->ixgbe_ieee_ets);
+#endif
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
@@ -6101,7 +6104,10 @@ static void ixgbe_watchdog_task(struct work_struct *work)
(link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
"10 Gbps" :
(link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
- "1 Gbps" : "unknown speed")),
+ "1 Gbps" :
+ (link_speed == IXGBE_LINK_SPEED_100_FULL ?
+ "100 Mbps" :
+ "unknown speed"))),
((flow_rx && flow_tx) ? "RX/TX" :
(flow_rx ? "RX" :
(flow_tx ? "TX" : "None"))));
@@ -7706,16 +7712,6 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
#endif /* CONFIG_IXGBE_DCA */
-/**
- * ixgbe_get_hw_dev return device
- * used by hardware layer to print debugging information
- **/
-struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw)
-{
- struct ixgbe_adapter *adapter = hw->back;
- return adapter->netdev;
-}
-
module_exit(ixgbe_exit_module);
/* ixgbe_main.c */
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
index ea82c5a..f215c4c 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -437,6 +437,7 @@ out_no_read:
return ret_val;
}
+#ifdef CONFIG_PCI_IOV
/**
* ixgbe_init_mbx_params_pf - set initial values for pf mailbox
* @hw: pointer to the HW structure
@@ -465,6 +466,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
break;
}
}
+#endif /* CONFIG_PCI_IOV */
struct ixgbe_mbx_operations mbx_ops_generic = {
.read = ixgbe_read_mbx_pf,
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
index 3df9b15..ada0ce3 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -86,7 +86,9 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+#ifdef CONFIG_PCI_IOV
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+#endif /* CONFIG_PCI_IOV */
extern struct ixgbe_mbx_operations mbx_ops_generic;
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index e97ebef..5b441b7 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -161,6 +161,67 @@ jme_setup_wakeup_frame(struct jme_adapter *jme,
}
static inline void
+jme_mac_rxclk_off(struct jme_adapter *jme)
+{
+ jme->reg_gpreg1 |= GPREG1_RXCLKOFF;
+ jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
+}
+
+static inline void
+jme_mac_rxclk_on(struct jme_adapter *jme)
+{
+ jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF;
+ jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
+}
+
+static inline void
+jme_mac_txclk_off(struct jme_adapter *jme)
+{
+ jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC);
+ jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_mac_txclk_on(struct jme_adapter *jme)
+{
+ u32 speed = jme->reg_ghc & GHC_SPEED;
+ if (speed == GHC_SPEED_1000M)
+ jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
+ else
+ jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
+ jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_reset_ghc_speed(struct jme_adapter *jme)
+{
+ jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX);
+ jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_reset_250A2_workaround(struct jme_adapter *jme)
+{
+ jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
+ GPREG1_RSSPATCH);
+ jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
+}
+
+static inline void
+jme_assert_ghc_reset(struct jme_adapter *jme)
+{
+ jme->reg_ghc |= GHC_SWRST;
+ jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_clear_ghc_reset(struct jme_adapter *jme)
+{
+ jme->reg_ghc &= ~GHC_SWRST;
+ jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
jme_reset_mac_processor(struct jme_adapter *jme)
{
static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
@@ -168,9 +229,24 @@ jme_reset_mac_processor(struct jme_adapter *jme)
u32 gpreg0;
int i;
- jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
- udelay(2);
- jwrite32(jme, JME_GHC, jme->reg_ghc);
+ jme_reset_ghc_speed(jme);
+ jme_reset_250A2_workaround(jme);
+
+ jme_mac_rxclk_on(jme);
+ jme_mac_txclk_on(jme);
+ udelay(1);
+ jme_assert_ghc_reset(jme);
+ udelay(1);
+ jme_mac_rxclk_off(jme);
+ jme_mac_txclk_off(jme);
+ udelay(1);
+ jme_clear_ghc_reset(jme);
+ udelay(1);
+ jme_mac_rxclk_on(jme);
+ jme_mac_txclk_on(jme);
+ udelay(1);
+ jme_mac_rxclk_off(jme);
+ jme_mac_txclk_off(jme);
jwrite32(jme, JME_RXDBA_LO, 0x00000000);
jwrite32(jme, JME_RXDBA_HI, 0x00000000);
@@ -190,14 +266,6 @@ jme_reset_mac_processor(struct jme_adapter *jme)
else
gpreg0 = GPREG0_DEFAULT;
jwrite32(jme, JME_GPREG0, gpreg0);
- jwrite32(jme, JME_GPREG1, GPREG1_DEFAULT);
-}
-
-static inline void
-jme_reset_ghc_speed(struct jme_adapter *jme)
-{
- jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX);
- jwrite32(jme, JME_GHC, jme->reg_ghc);
}
static inline void
@@ -336,13 +404,13 @@ jme_linkstat_from_phy(struct jme_adapter *jme)
}
static inline void
-jme_set_phyfifoa(struct jme_adapter *jme)
+jme_set_phyfifo_5level(struct jme_adapter *jme)
{
jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
}
static inline void
-jme_set_phyfifob(struct jme_adapter *jme)
+jme_set_phyfifo_8level(struct jme_adapter *jme)
{
jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
}
@@ -351,7 +419,7 @@ static int
jme_check_link(struct net_device *netdev, int testonly)
{
struct jme_adapter *jme = netdev_priv(netdev);
- u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr, gpreg1;
+ u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr;
char linkmsg[64];
int rc = 0;
@@ -414,23 +482,21 @@ jme_check_link(struct net_device *netdev, int testonly)
jme->phylink = phylink;
- ghc = jme->reg_ghc & ~(GHC_SPEED | GHC_DPX |
- GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE |
- GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY);
+ /*
+ * The speed/duplex setting of jme->reg_ghc already cleared
+ * by jme_reset_mac_processor()
+ */
switch (phylink & PHY_LINK_SPEED_MASK) {
case PHY_LINK_SPEED_10M:
- ghc |= GHC_SPEED_10M |
- GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
+ jme->reg_ghc |= GHC_SPEED_10M;
strcat(linkmsg, "10 Mbps, ");
break;
case PHY_LINK_SPEED_100M:
- ghc |= GHC_SPEED_100M |
- GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
+ jme->reg_ghc |= GHC_SPEED_100M;
strcat(linkmsg, "100 Mbps, ");
break;
case PHY_LINK_SPEED_1000M:
- ghc |= GHC_SPEED_1000M |
- GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
+ jme->reg_ghc |= GHC_SPEED_1000M;
strcat(linkmsg, "1000 Mbps, ");
break;
default:
@@ -439,42 +505,40 @@ jme_check_link(struct net_device *netdev, int testonly)
if (phylink & PHY_LINK_DUPLEX) {
jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
- ghc |= GHC_DPX;
+ jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX);
+ jme->reg_ghc |= GHC_DPX;
} else {
jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
TXMCS_BACKOFF |
TXMCS_CARRIERSENSE |
TXMCS_COLLISION);
- jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
- ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
- TXTRHD_TXREN |
- ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
+ jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX);
}
- gpreg1 = GPREG1_DEFAULT;
+ jwrite32(jme, JME_GHC, jme->reg_ghc);
+
if (is_buggy250(jme->pdev->device, jme->chiprev)) {
+ jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
+ GPREG1_RSSPATCH);
if (!(phylink & PHY_LINK_DUPLEX))
- gpreg1 |= GPREG1_HALFMODEPATCH;
+ jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH;
switch (phylink & PHY_LINK_SPEED_MASK) {
case PHY_LINK_SPEED_10M:
- jme_set_phyfifoa(jme);
- gpreg1 |= GPREG1_RSSPATCH;
+ jme_set_phyfifo_8level(jme);
+ jme->reg_gpreg1 |= GPREG1_RSSPATCH;
break;
case PHY_LINK_SPEED_100M:
- jme_set_phyfifob(jme);
- gpreg1 |= GPREG1_RSSPATCH;
+ jme_set_phyfifo_5level(jme);
+ jme->reg_gpreg1 |= GPREG1_RSSPATCH;
break;
case PHY_LINK_SPEED_1000M:
- jme_set_phyfifoa(jme);
+ jme_set_phyfifo_8level(jme);
break;
default:
break;
}
}
-
- jwrite32(jme, JME_GPREG1, gpreg1);
- jwrite32(jme, JME_GHC, ghc);
- jme->reg_ghc = ghc;
+ jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
"Full-Duplex, " :
@@ -613,10 +677,14 @@ jme_enable_tx_engine(struct jme_adapter *jme)
* Enable TX Engine
*/
wmb();
- jwrite32(jme, JME_TXCS, jme->reg_txcs |
+ jwrite32f(jme, JME_TXCS, jme->reg_txcs |
TXCS_SELECT_QUEUE0 |
TXCS_ENABLE);
+ /*
+ * Start clock for TX MAC Processor
+ */
+ jme_mac_txclk_on(jme);
}
static inline void
@@ -651,6 +719,11 @@ jme_disable_tx_engine(struct jme_adapter *jme)
if (!i)
pr_err("Disable TX engine timeout\n");
+
+ /*
+ * Stop clock for TX MAC Processor
+ */
+ jme_mac_txclk_off(jme);
}
static void
@@ -825,16 +898,22 @@ jme_enable_rx_engine(struct jme_adapter *jme)
/*
* Setup Unicast Filter
*/
+ jme_set_unicastaddr(jme->dev);
jme_set_multi(jme->dev);
/*
* Enable RX Engine
*/
wmb();
- jwrite32(jme, JME_RXCS, jme->reg_rxcs |
+ jwrite32f(jme, JME_RXCS, jme->reg_rxcs |
RXCS_QUEUESEL_Q0 |
RXCS_ENABLE |
RXCS_QST);
+
+ /*
+ * Start clock for RX MAC Processor
+ */
+ jme_mac_rxclk_on(jme);
}
static inline void
@@ -871,10 +950,40 @@ jme_disable_rx_engine(struct jme_adapter *jme)
if (!i)
pr_err("Disable RX engine timeout\n");
+ /*
+ * Stop clock for RX MAC Processor
+ */
+ jme_mac_rxclk_off(jme);
+}
+
+static u16
+jme_udpsum(struct sk_buff *skb)
+{
+ u16 csum = 0xFFFFu;
+
+ if (skb->len < (ETH_HLEN + sizeof(struct iphdr)))
+ return csum;
+ if (skb->protocol != htons(ETH_P_IP))
+ return csum;
+ skb_set_network_header(skb, ETH_HLEN);
+ if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
+ (skb->len < (ETH_HLEN +
+ (ip_hdr(skb)->ihl << 2) +
+ sizeof(struct udphdr)))) {
+ skb_reset_network_header(skb);
+ return csum;
+ }
+ skb_set_transport_header(skb,
+ ETH_HLEN + (ip_hdr(skb)->ihl << 2));
+ csum = udp_hdr(skb)->check;
+ skb_reset_transport_header(skb);
+ skb_reset_network_header(skb);
+
+ return csum;
}
static int
-jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
+jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb)
{
if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
return false;
@@ -887,7 +996,7 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
}
if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
- == RXWBFLAG_UDPON)) {
+ == RXWBFLAG_UDPON) && jme_udpsum(skb)) {
if (flags & RXWBFLAG_IPV4)
netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
return false;
@@ -935,7 +1044,7 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
skb_put(skb, framesize);
skb->protocol = eth_type_trans(skb, jme->dev);
- if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags)))
+ if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
@@ -1207,7 +1316,6 @@ jme_link_change_tasklet(unsigned long arg)
tasklet_disable(&jme->rxempty_task);
if (netif_carrier_ok(netdev)) {
- jme_reset_ghc_speed(jme);
jme_disable_rx_engine(jme);
jme_disable_tx_engine(jme);
jme_reset_mac_processor(jme);
@@ -1577,6 +1685,38 @@ jme_free_irq(struct jme_adapter *jme)
}
static inline void
+jme_new_phy_on(struct jme_adapter *jme)
+{
+ u32 reg;
+
+ reg = jread32(jme, JME_PHY_PWR);
+ reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
+ PHY_PWR_DWN2 | PHY_PWR_CLKSEL);
+ jwrite32(jme, JME_PHY_PWR, reg);
+
+ pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
+ reg &= ~PE1_GPREG0_PBG;
+ reg |= PE1_GPREG0_ENBG;
+ pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
+}
+
+static inline void
+jme_new_phy_off(struct jme_adapter *jme)
+{
+ u32 reg;
+
+ reg = jread32(jme, JME_PHY_PWR);
+ reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
+ PHY_PWR_DWN2 | PHY_PWR_CLKSEL;
+ jwrite32(jme, JME_PHY_PWR, reg);
+
+ pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
+ reg &= ~PE1_GPREG0_PBG;
+ reg |= PE1_GPREG0_PDD3COLD;
+ pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
+}
+
+static inline void
jme_phy_on(struct jme_adapter *jme)
{
u32 bmcr;
@@ -1584,6 +1724,22 @@ jme_phy_on(struct jme_adapter *jme)
bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
bmcr &= ~BMCR_PDOWN;
jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
+
+ if (new_phy_power_ctrl(jme->chip_main_rev))
+ jme_new_phy_on(jme);
+}
+
+static inline void
+jme_phy_off(struct jme_adapter *jme)
+{
+ u32 bmcr;
+
+ bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
+ bmcr |= BMCR_PDOWN;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
+
+ if (new_phy_power_ctrl(jme->chip_main_rev))
+ jme_new_phy_off(jme);
}
static int
@@ -1606,12 +1762,11 @@ jme_open(struct net_device *netdev)
jme_start_irq(jme);
- if (test_bit(JME_FLAG_SSET, &jme->flags)) {
- jme_phy_on(jme);
+ jme_phy_on(jme);
+ if (test_bit(JME_FLAG_SSET, &jme->flags))
jme_set_settings(netdev, &jme->old_ecmd);
- } else {
+ else
jme_reset_phy_processor(jme);
- }
jme_reset_link(jme);
@@ -1657,12 +1812,6 @@ jme_wait_link(struct jme_adapter *jme)
}
}
-static inline void
-jme_phy_off(struct jme_adapter *jme)
-{
- jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
-}
-
static void
jme_powersave_phy(struct jme_adapter *jme)
{
@@ -1696,7 +1845,6 @@ jme_close(struct net_device *netdev)
tasklet_disable(&jme->rxclean_task);
tasklet_disable(&jme->rxempty_task);
- jme_reset_ghc_speed(jme);
jme_disable_rx_engine(jme);
jme_disable_tx_engine(jme);
jme_reset_mac_processor(jme);
@@ -1993,27 +2141,34 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+static void
+jme_set_unicastaddr(struct net_device *netdev)
+{
+ struct jme_adapter *jme = netdev_priv(netdev);
+ u32 val;
+
+ val = (netdev->dev_addr[3] & 0xff) << 24 |
+ (netdev->dev_addr[2] & 0xff) << 16 |
+ (netdev->dev_addr[1] & 0xff) << 8 |
+ (netdev->dev_addr[0] & 0xff);
+ jwrite32(jme, JME_RXUMA_LO, val);
+ val = (netdev->dev_addr[5] & 0xff) << 8 |
+ (netdev->dev_addr[4] & 0xff);
+ jwrite32(jme, JME_RXUMA_HI, val);
+}
+
static int
jme_set_macaddr(struct net_device *netdev, void *p)
{
struct jme_adapter *jme = netdev_priv(netdev);
struct sockaddr *addr = p;
- u32 val;
if (netif_running(netdev))
return -EBUSY;
spin_lock_bh(&jme->macaddr_lock);
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-
- val = (addr->sa_data[3] & 0xff) << 24 |
- (addr->sa_data[2] & 0xff) << 16 |
- (addr->sa_data[1] & 0xff) << 8 |
- (addr->sa_data[0] & 0xff);
- jwrite32(jme, JME_RXUMA_LO, val);
- val = (addr->sa_data[5] & 0xff) << 8 |
- (addr->sa_data[4] & 0xff);
- jwrite32(jme, JME_RXUMA_HI, val);
+ jme_set_unicastaddr(netdev);
spin_unlock_bh(&jme->macaddr_lock);
return 0;
@@ -2731,6 +2886,8 @@ jme_check_hw_ver(struct jme_adapter *jme)
jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
+ jme->chip_main_rev = jme->chiprev & 0xF;
+ jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF;
}
static const struct net_device_ops jme_netdev_ops = {
@@ -2880,6 +3037,7 @@ jme_init_one(struct pci_dev *pdev,
jme->reg_rxmcs = RXMCS_DEFAULT;
jme->reg_txpfc = 0;
jme->reg_pmcs = PMCS_MFEN;
+ jme->reg_gpreg1 = GPREG1_DEFAULT;
set_bit(JME_FLAG_TXCSUM, &jme->flags);
set_bit(JME_FLAG_TSO, &jme->flags);
@@ -2936,8 +3094,8 @@ jme_init_one(struct pci_dev *pdev,
jme->mii_if.mdio_write = jme_mdio_write;
jme_clear_pm(jme);
- jme_set_phyfifoa(jme);
- pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->rev);
+ jme_set_phyfifo_5level(jme);
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->pcirev);
if (!jme->fpgaver)
jme_phy_init(jme);
jme_phy_off(jme);
@@ -2964,14 +3122,14 @@ jme_init_one(struct pci_dev *pdev,
goto err_out_unmap;
}
- netif_info(jme, probe, jme->dev, "%s%s ver:%x rev:%x macaddr:%pM\n",
+ netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n",
(jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
"JMC250 Gigabit Ethernet" :
(jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
"JMC260 Fast Ethernet" : "Unknown",
(jme->fpgaver != 0) ? " (FPGA)" : "",
(jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
- jme->rev, netdev->dev_addr);
+ jme->pcirev, netdev->dev_addr);
return 0;
@@ -3035,7 +3193,6 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state)
jme_polling_mode(jme);
jme_stop_pcc_timer(jme);
- jme_reset_ghc_speed(jme);
jme_disable_rx_engine(jme);
jme_disable_tx_engine(jme);
jme_reset_mac_processor(jme);
@@ -3066,12 +3223,11 @@ jme_resume(struct pci_dev *pdev)
jme_clear_pm(jme);
pci_restore_state(pdev);
- if (test_bit(JME_FLAG_SSET, &jme->flags)) {
- jme_phy_on(jme);
+ jme_phy_on(jme);
+ if (test_bit(JME_FLAG_SSET, &jme->flags))
jme_set_settings(netdev, &jme->old_ecmd);
- } else {
+ else
jme_reset_phy_processor(jme);
- }
jme_start_irq(jme);
netif_device_attach(netdev);
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index eac0926..8bf3045 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -26,7 +26,7 @@
#define __JME_H_INCLUDED__
#define DRV_NAME "jme"
-#define DRV_VERSION "1.0.7"
+#define DRV_VERSION "1.0.8"
#define PFX DRV_NAME ": "
#define PCI_DEVICE_ID_JMICRON_JMC250 0x0250
@@ -103,6 +103,37 @@ enum jme_spi_op_bits {
#define HALF_US 500 /* 500 ns */
#define JMESPIIOCTL SIOCDEVPRIVATE
+#define PCI_PRIV_PE1 0xE4
+
+enum pci_priv_pe1_bit_masks {
+ PE1_ASPMSUPRT = 0x00000003, /*
+ * RW:
+ * Aspm_support[1:0]
+ * (R/W Port of 5C[11:10])
+ */
+ PE1_MULTIFUN = 0x00000004, /* RW: Multi_fun_bit */
+ PE1_RDYDMA = 0x00000008, /* RO: ~link.rdy_for_dma */
+ PE1_ASPMOPTL = 0x00000030, /* RW: link.rx10s_option[1:0] */
+ PE1_ASPMOPTH = 0x000000C0, /* RW: 10_req=[3]?HW:[2] */
+ PE1_GPREG0 = 0x0000FF00, /*
+ * SRW:
+ * Cfg_gp_reg0
+ * [7:6] phy_giga BG control
+ * [5] CREQ_N as CREQ_N1 (CPPE# as CREQ#)
+ * [4:0] Reserved
+ */
+ PE1_GPREG0_PBG = 0x0000C000, /* phy_giga BG control */
+ PE1_GPREG1 = 0x00FF0000, /* RW: Cfg_gp_reg1 */
+ PE1_REVID = 0xFF000000, /* RO: Rev ID */
+};
+
+enum pci_priv_pe1_values {
+ PE1_GPREG0_ENBG = 0x00000000, /* en BG */
+ PE1_GPREG0_PDD3COLD = 0x00004000, /* giga_PD + d3cold */
+ PE1_GPREG0_PDPCIESD = 0x00008000, /* giga_PD + pcie_shutdown */
+ PE1_GPREG0_PDPCIEIDDQ = 0x0000C000, /* giga_PD + pcie_iddq */
+};
+
/*
* Dynamic(adaptive)/Static PCC values
*/
@@ -403,6 +434,7 @@ struct jme_adapter {
u32 reg_rxmcs;
u32 reg_ghc;
u32 reg_pmcs;
+ u32 reg_gpreg1;
u32 phylink;
u32 tx_ring_size;
u32 tx_ring_mask;
@@ -411,8 +443,10 @@ struct jme_adapter {
u32 rx_ring_mask;
u8 mrrs;
unsigned int fpgaver;
- unsigned int chiprev;
- u8 rev;
+ u8 chiprev;
+ u8 chip_main_rev;
+ u8 chip_sub_rev;
+ u8 pcirev;
u32 msg_enable;
struct ethtool_cmd old_ecmd;
unsigned int old_mtu;
@@ -497,6 +531,7 @@ enum jme_iomap_regs {
JME_PMCS = JME_MAC | 0x60, /* Power Management Control/Stat */
+ JME_PHY_PWR = JME_PHY | 0x24, /* New PHY Power Ctrl Register */
JME_PHY_CS = JME_PHY | 0x28, /* PHY Ctrl and Status Register */
JME_PHY_LINK = JME_PHY | 0x30, /* PHY Link Status Register */
JME_SMBCSR = JME_PHY | 0x40, /* SMB Control and Status */
@@ -624,6 +659,14 @@ enum jme_txtrhd_shifts {
TXTRHD_TXRL_SHIFT = 0,
};
+enum jme_txtrhd_values {
+ TXTRHD_FULLDUPLEX = 0x00000000,
+ TXTRHD_HALFDUPLEX = TXTRHD_TXPEN |
+ ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
+ TXTRHD_TXREN |
+ ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL),
+};
+
/*
* RX Control/Status Bits
*/
@@ -779,6 +822,8 @@ static inline u32 smi_phy_addr(int x)
*/
enum jme_ghc_bit_mask {
GHC_SWRST = 0x40000000,
+ GHC_TO_CLK_SRC = 0x00C00000,
+ GHC_TXMAC_CLK_SRC = 0x00300000,
GHC_DPX = 0x00000040,
GHC_SPEED = 0x00000030,
GHC_LINK_POLL = 0x00000001,
@@ -833,6 +878,21 @@ enum jme_pmcs_bit_masks {
};
/*
+ * New PHY Power Control Register
+ */
+enum jme_phy_pwr_bit_masks {
+ PHY_PWR_DWN1SEL = 0x01000000, /* Phy_giga.p_PWR_DOWN1_SEL */
+ PHY_PWR_DWN1SW = 0x02000000, /* Phy_giga.p_PWR_DOWN1_SW */
+ PHY_PWR_DWN2 = 0x04000000, /* Phy_giga.p_PWR_DOWN2 */
+ PHY_PWR_CLKSEL = 0x08000000, /*
+ * XTL_OUT Clock select
+ * (an internal free-running clock)
+ * 0: xtl_out = phy_giga.A_XTL25_O
+ * 1: xtl_out = phy_giga.PD_OSC
+ */
+};
+
+/*
* Giga PHY Status Registers
*/
enum jme_phy_link_bit_mask {
@@ -942,18 +1002,17 @@ enum jme_gpreg0_vals {
/*
* General Purpose REG-1
- * Note: All theses bits defined here are for
- * Chip mode revision 0x11 only
*/
-enum jme_gpreg1_masks {
+enum jme_gpreg1_bit_masks {
+ GPREG1_RXCLKOFF = 0x04000000,
+ GPREG1_PCREQN = 0x00020000,
+ GPREG1_HALFMODEPATCH = 0x00000040, /* For Chip revision 0x11 only */
+ GPREG1_RSSPATCH = 0x00000020, /* For Chip revision 0x11 only */
GPREG1_INTRDELAYUNIT = 0x00000018,
GPREG1_INTRDELAYENABLE = 0x00000007,
};
enum jme_gpreg1_vals {
- GPREG1_RSSPATCH = 0x00000040,
- GPREG1_HALFMODEPATCH = 0x00000020,
-
GPREG1_INTDLYUNIT_16NS = 0x00000000,
GPREG1_INTDLYUNIT_256NS = 0x00000008,
GPREG1_INTDLYUNIT_1US = 0x00000010,
@@ -967,7 +1026,7 @@ enum jme_gpreg1_vals {
GPREG1_INTDLYEN_6U = 0x00000006,
GPREG1_INTDLYEN_7U = 0x00000007,
- GPREG1_DEFAULT = 0x00000000,
+ GPREG1_DEFAULT = GPREG1_PCREQN,
};
/*
@@ -1184,16 +1243,22 @@ enum jme_phy_reg17_vals {
/*
* Workaround
*/
-static inline int is_buggy250(unsigned short device, unsigned int chiprev)
+static inline int is_buggy250(unsigned short device, u8 chiprev)
{
return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11;
}
+static inline int new_phy_power_ctrl(u8 chip_main_rev)
+{
+ return chip_main_rev >= 5;
+}
+
/*
* Function prototypes
*/
static int jme_set_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd);
+static void jme_set_unicastaddr(struct net_device *netdev);
static void jme_set_multi(struct net_device *netdev);
#endif
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 2d9663a..ea0dc45 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -129,10 +129,6 @@ static u32 always_on(struct net_device *dev)
static const struct ethtool_ops loopback_ethtool_ops = {
.get_link = always_on,
- .set_tso = ethtool_op_set_tso,
- .get_tx_csum = always_on,
- .get_sg = always_on,
- .get_rx_csum = always_on,
};
static int loopback_dev_init(struct net_device *dev)
@@ -169,9 +165,12 @@ static void loopback_setup(struct net_device *dev)
dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
dev->flags = IFF_LOOPBACK;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
- | NETIF_F_TSO
+ | NETIF_F_ALL_TSO
+ | NETIF_F_UFO
| NETIF_F_NO_CSUM
+ | NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
| NETIF_F_LLTX
| NETIF_F_NETNS_LOCAL;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 5933621..2300e45 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -39,7 +39,7 @@ struct macvtap_queue {
struct socket sock;
struct socket_wq wq;
int vnet_hdr_sz;
- struct macvlan_dev *vlan;
+ struct macvlan_dev __rcu *vlan;
struct file *file;
unsigned int flags;
};
@@ -141,7 +141,8 @@ static void macvtap_put_queue(struct macvtap_queue *q)
struct macvlan_dev *vlan;
spin_lock(&macvtap_lock);
- vlan = rcu_dereference(q->vlan);
+ vlan = rcu_dereference_protected(q->vlan,
+ lockdep_is_held(&macvtap_lock));
if (vlan) {
int index = get_slot(vlan, q);
@@ -219,7 +220,8 @@ static void macvtap_del_queues(struct net_device *dev)
/* macvtap_put_queue can free some slots, so go through all slots */
spin_lock(&macvtap_lock);
for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) {
- q = rcu_dereference(vlan->taps[i]);
+ q = rcu_dereference_protected(vlan->taps[i],
+ lockdep_is_held(&macvtap_lock));
if (q) {
qlist[j++] = q;
rcu_assign_pointer(vlan->taps[i], NULL);
@@ -569,7 +571,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
}
rcu_read_lock_bh();
- vlan = rcu_dereference(q->vlan);
+ vlan = rcu_dereference_bh(q->vlan);
if (vlan)
macvlan_start_xmit(skb, vlan->dev);
else
@@ -583,7 +585,7 @@ err_kfree:
err:
rcu_read_lock_bh();
- vlan = rcu_dereference(q->vlan);
+ vlan = rcu_dereference_bh(q->vlan);
if (vlan)
vlan->dev->stats.tx_dropped++;
rcu_read_unlock_bh();
@@ -631,7 +633,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
rcu_read_lock_bh();
- vlan = rcu_dereference(q->vlan);
+ vlan = rcu_dereference_bh(q->vlan);
if (vlan)
macvlan_count_rx(vlan, len, ret == 0, 0);
rcu_read_unlock_bh();
@@ -727,7 +729,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
case TUNGETIFF:
rcu_read_lock_bh();
- vlan = rcu_dereference(q->vlan);
+ vlan = rcu_dereference_bh(q->vlan);
if (vlan)
dev_hold(vlan->dev);
rcu_read_unlock_bh();
@@ -736,7 +738,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
return -ENOLINK;
ret = 0;
- if (copy_to_user(&ifr->ifr_name, q->vlan->dev->name, IFNAMSIZ) ||
+ if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
put_user(q->flags, &ifr->ifr_flags))
ret = -EFAULT;
dev_put(vlan->dev);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index ea5cfe2..a7f2eed 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -253,7 +253,7 @@ struct myri10ge_priv {
unsigned long serial_number;
int vendor_specific_offset;
int fw_multicast_support;
- unsigned long features;
+ u32 features;
u32 max_tso6;
u32 read_dma;
u32 write_dma;
@@ -1776,7 +1776,7 @@ static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
- unsigned long flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
+ u32 flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
if (tso_enabled)
netdev->features |= flags;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 35fda5a..392a6c4 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -77,7 +77,6 @@ config NATIONAL_PHY
Currently supports the DP83865 PHY.
config STE10XP
- depends on PHYLIB
tristate "Driver for STMicroelectronics STe10Xp PHYs"
---help---
This is the driver for the STe100p and STe101p PHYs.
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 0fd1678..590f902 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -19,13 +19,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/phy.h>
-
-#define PHY_ID_KSZ9021 0x00221611
-#define PHY_ID_KS8737 0x00221720
-#define PHY_ID_KS8041 0x00221510
-#define PHY_ID_KS8051 0x00221550
-/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */
-#define PHY_ID_KS8001 0x0022161A
+#include <linux/micrel_phy.h>
/* general Interrupt control/status reg in vendor specific block. */
#define MII_KSZPHY_INTCS 0x1B
@@ -46,6 +40,7 @@
#define KSZPHY_CTRL_INT_ACTIVE_HIGH (1 << 9)
#define KSZ9021_CTRL_INT_ACTIVE_HIGH (1 << 14)
#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14)
+#define KSZ8051_RMII_50MHZ_CLK (1 << 7)
static int kszphy_ack_interrupt(struct phy_device *phydev)
{
@@ -106,6 +101,19 @@ static int kszphy_config_init(struct phy_device *phydev)
return 0;
}
+static int ks8051_config_init(struct phy_device *phydev)
+{
+ int regval;
+
+ if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
+ regval = phy_read(phydev, MII_KSZPHY_CTRL);
+ regval |= KSZ8051_RMII_50MHZ_CLK;
+ phy_write(phydev, MII_KSZPHY_CTRL, regval);
+ }
+
+ return 0;
+}
+
static struct phy_driver ks8737_driver = {
.phy_id = PHY_ID_KS8737,
.phy_id_mask = 0x00fffff0,
@@ -142,7 +150,7 @@ static struct phy_driver ks8051_driver = {
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init,
+ .config_init = ks8051_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index c7a6c44..9f6d670 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -592,8 +592,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ppp_release(NULL, file);
err = 0;
} else
- printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n",
- atomic_long_read(&file->f_count));
+ pr_warn("PPPIOCDETACH file->f_count=%ld\n",
+ atomic_long_read(&file->f_count));
mutex_unlock(&ppp_mutex);
return err;
}
@@ -630,7 +630,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (pf->kind != INTERFACE) {
/* can't happen */
- printk(KERN_ERR "PPP: not interface or channel??\n");
+ pr_err("PPP: not interface or channel??\n");
return -EINVAL;
}
@@ -704,7 +704,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
vj = slhc_init(val2+1, val+1);
if (!vj) {
- printk(KERN_ERR "PPP: no memory (VJ compressor)\n");
+ netdev_err(ppp->dev,
+ "PPP: no memory (VJ compressor)\n");
err = -ENOMEM;
break;
}
@@ -898,17 +899,17 @@ static int __init ppp_init(void)
{
int err;
- printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n");
+ pr_info("PPP generic driver version " PPP_VERSION "\n");
err = register_pernet_device(&ppp_net_ops);
if (err) {
- printk(KERN_ERR "failed to register PPP pernet device (%d)\n", err);
+ pr_err("failed to register PPP pernet device (%d)\n", err);
goto out;
}
err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
if (err) {
- printk(KERN_ERR "failed to register PPP device (%d)\n", err);
+ pr_err("failed to register PPP device (%d)\n", err);
goto out_net;
}
@@ -1078,7 +1079,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
if (!new_skb) {
if (net_ratelimit())
- printk(KERN_ERR "PPP: no memory (comp pkt)\n");
+ netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n");
return NULL;
}
if (ppp->dev->hard_header_len > PPP_HDRLEN)
@@ -1108,7 +1109,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
* the same number.
*/
if (net_ratelimit())
- printk(KERN_ERR "ppp: compressor dropped pkt\n");
+ netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
kfree_skb(skb);
kfree_skb(new_skb);
new_skb = NULL;
@@ -1138,7 +1139,9 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
if (ppp->pass_filter &&
sk_run_filter(skb, ppp->pass_filter) == 0) {
if (ppp->debug & 1)
- printk(KERN_DEBUG "PPP: outbound frame not passed\n");
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "PPP: outbound frame "
+ "not passed\n");
kfree_skb(skb);
return;
}
@@ -1164,7 +1167,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
GFP_ATOMIC);
if (!new_skb) {
- printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n");
+ netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n");
goto drop;
}
skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
@@ -1202,7 +1205,9 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
proto != PPP_LCP && proto != PPP_CCP) {
if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
if (net_ratelimit())
- printk(KERN_ERR "ppp: compression required but down - pkt dropped.\n");
+ netdev_err(ppp->dev,
+ "ppp: compression required but "
+ "down - pkt dropped.\n");
goto drop;
}
skb = pad_compress_skb(ppp, skb);
@@ -1505,7 +1510,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
noskb:
spin_unlock_bh(&pch->downl);
if (ppp->debug & 1)
- printk(KERN_ERR "PPP: no memory (fragment)\n");
+ netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
++ppp->dev->stats.tx_errors;
++ppp->nxseq;
return 1; /* abandon the frame */
@@ -1686,7 +1691,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
/* copy to a new sk_buff with more tailroom */
ns = dev_alloc_skb(skb->len + 128);
if (!ns) {
- printk(KERN_ERR"PPP: no memory (VJ decomp)\n");
+ netdev_err(ppp->dev, "PPP: no memory "
+ "(VJ decomp)\n");
goto err;
}
skb_reserve(ns, 2);
@@ -1699,7 +1705,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
if (len <= 0) {
- printk(KERN_DEBUG "PPP: VJ decompression error\n");
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "PPP: VJ decompression error\n");
goto err;
}
len += 2;
@@ -1721,7 +1728,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
goto err;
if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
- printk(KERN_ERR "PPP: VJ uncompressed error\n");
+ netdev_err(ppp->dev, "PPP: VJ uncompressed error\n");
goto err;
}
proto = PPP_IP;
@@ -1762,8 +1769,9 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
if (ppp->pass_filter &&
sk_run_filter(skb, ppp->pass_filter) == 0) {
if (ppp->debug & 1)
- printk(KERN_DEBUG "PPP: inbound frame "
- "not passed\n");
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "PPP: inbound frame "
+ "not passed\n");
kfree_skb(skb);
return;
}
@@ -1821,7 +1829,8 @@ ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
ns = dev_alloc_skb(obuff_size);
if (!ns) {
- printk(KERN_ERR "ppp_decompress_frame: no memory\n");
+ netdev_err(ppp->dev, "ppp_decompress_frame: "
+ "no memory\n");
goto err;
}
/* the decompressor still expects the A/C bytes in the hdr */
@@ -1989,7 +1998,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
u32 seq = ppp->nextseq;
u32 minseq = ppp->minseq;
struct sk_buff_head *list = &ppp->mrq;
- struct sk_buff *p, *next;
+ struct sk_buff *p, *tmp;
struct sk_buff *head, *tail;
struct sk_buff *skb = NULL;
int lost = 0, len = 0;
@@ -1998,13 +2007,15 @@ ppp_mp_reconstruct(struct ppp *ppp)
return NULL;
head = list->next;
tail = NULL;
- for (p = head; p != (struct sk_buff *) list; p = next) {
- next = p->next;
+ skb_queue_walk_safe(list, p, tmp) {
+ again:
if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
/* this can't happen, anyway ignore the skb */
- printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n",
- PPP_MP_CB(p)->sequence, seq);
- head = next;
+ netdev_err(ppp->dev, "ppp_mp_reconstruct bad "
+ "seq %u < %u\n",
+ PPP_MP_CB(p)->sequence, seq);
+ __skb_unlink(p, list);
+ kfree_skb(p);
continue;
}
if (PPP_MP_CB(p)->sequence != seq) {
@@ -2016,8 +2027,7 @@ ppp_mp_reconstruct(struct ppp *ppp)
lost = 1;
seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
minseq + 1: PPP_MP_CB(p)->sequence;
- next = p;
- continue;
+ goto again;
}
/*
@@ -2042,17 +2052,9 @@ ppp_mp_reconstruct(struct ppp *ppp)
(PPP_MP_CB(head)->BEbits & B)) {
if (len > ppp->mrru + 2) {
++ppp->dev->stats.rx_length_errors;
- printk(KERN_DEBUG "PPP: reconstructed packet"
- " is too long (%d)\n", len);
- } else if (p == head) {
- /* fragment is complete packet - reuse skb */
- tail = p;
- skb = skb_get(p);
- break;
- } else if ((skb = dev_alloc_skb(len)) == NULL) {
- ++ppp->dev->stats.rx_missed_errors;
- printk(KERN_DEBUG "PPP: no memory for "
- "reconstructed packet");
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "PPP: reconstructed packet"
+ " is too long (%d)\n", len);
} else {
tail = p;
break;
@@ -2065,9 +2067,17 @@ ppp_mp_reconstruct(struct ppp *ppp)
* and we haven't found a complete valid packet yet,
* we can discard up to and including this fragment.
*/
- if (PPP_MP_CB(p)->BEbits & E)
- head = next;
+ if (PPP_MP_CB(p)->BEbits & E) {
+ struct sk_buff *tmp2;
+ skb_queue_reverse_walk_from_safe(list, p, tmp2) {
+ __skb_unlink(p, list);
+ kfree_skb(p);
+ }
+ head = skb_peek(list);
+ if (!head)
+ break;
+ }
++seq;
}
@@ -2077,26 +2087,37 @@ ppp_mp_reconstruct(struct ppp *ppp)
signal a receive error. */
if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
if (ppp->debug & 1)
- printk(KERN_DEBUG " missed pkts %u..%u\n",
- ppp->nextseq,
- PPP_MP_CB(head)->sequence-1);
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ " missed pkts %u..%u\n",
+ ppp->nextseq,
+ PPP_MP_CB(head)->sequence-1);
++ppp->dev->stats.rx_dropped;
ppp_receive_error(ppp);
}
- if (head != tail)
- /* copy to a single skb */
- for (p = head; p != tail->next; p = p->next)
- skb_copy_bits(p, 0, skb_put(skb, p->len), p->len);
- ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
- head = tail->next;
- }
+ skb = head;
+ if (head != tail) {
+ struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
+ p = skb_queue_next(list, head);
+ __skb_unlink(skb, list);
+ skb_queue_walk_from_safe(list, p, tmp) {
+ __skb_unlink(p, list);
+ *fragpp = p;
+ p->next = NULL;
+ fragpp = &p->next;
+
+ skb->len += p->len;
+ skb->data_len += p->len;
+ skb->truesize += p->len;
+
+ if (p == tail)
+ break;
+ }
+ } else {
+ __skb_unlink(skb, list);
+ }
- /* Discard all the skbuffs that we have copied the data out of
- or that we can't use. */
- while ((p = list->next) != head) {
- __skb_unlink(p, list);
- kfree_skb(p);
+ ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
}
return skb;
@@ -2617,8 +2638,8 @@ ppp_create_interface(struct net *net, int unit, int *retp)
ret = register_netdev(dev);
if (ret != 0) {
unit_put(&pn->units_idr, unit);
- printk(KERN_ERR "PPP: couldn't register device %s (%d)\n",
- dev->name, ret);
+ netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
+ dev->name, ret);
goto out2;
}
@@ -2690,9 +2711,9 @@ static void ppp_destroy_interface(struct ppp *ppp)
if (!ppp->file.dead || ppp->n_channels) {
/* "can't happen" */
- printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d "
- "n_channels=%d !\n", ppp, ppp->file.dead,
- ppp->n_channels);
+ netdev_err(ppp->dev, "ppp: destroying ppp struct %p "
+ "but dead=%d n_channels=%d !\n",
+ ppp, ppp->file.dead, ppp->n_channels);
return;
}
@@ -2834,8 +2855,7 @@ static void ppp_destroy_channel(struct channel *pch)
if (!pch->file.dead) {
/* "can't happen" */
- printk(KERN_ERR "ppp: destroying undead channel %p !\n",
- pch);
+ pr_err("ppp: destroying undead channel %p !\n", pch);
return;
}
skb_queue_purge(&pch->file.xq);
@@ -2847,7 +2867,7 @@ static void __exit ppp_cleanup(void)
{
/* should never happen */
if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
- printk(KERN_ERR "PPP: removing module but units remain!\n");
+ pr_err("PPP: removing module but units remain!\n");
unregister_chrdev(PPP_MAJOR, "ppp");
device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
class_destroy(ppp_class);
@@ -2865,7 +2885,7 @@ static int __unit_alloc(struct idr *p, void *ptr, int n)
again:
if (!idr_pre_get(p, GFP_KERNEL)) {
- printk(KERN_ERR "PPP: No free memory for idr\n");
+ pr_err("PPP: No free memory for idr\n");
return -ENOMEM;
}
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 002bac7..d4e0425 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -673,7 +673,7 @@ static void efx_fini_channels(struct efx_nic *efx)
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_fini_rx_queue(rx_queue);
- efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel)
efx_fini_tx_queue(tx_queue);
efx_fini_eventq(channel);
}
@@ -689,7 +689,7 @@ static void efx_remove_channel(struct efx_channel *channel)
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_remove_rx_queue(rx_queue);
- efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel)
efx_remove_tx_queue(tx_queue);
efx_remove_eventq(channel);
}
@@ -1271,21 +1271,8 @@ static void efx_remove_interrupts(struct efx_nic *efx)
static void efx_set_channels(struct efx_nic *efx)
{
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
-
efx->tx_channel_offset =
separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
-
- /* Channel pointers were set in efx_init_struct() but we now
- * need to clear them for TX queues in any RX-only channels. */
- efx_for_each_channel(channel, efx) {
- if (channel->channel - efx->tx_channel_offset >=
- efx->n_tx_channels) {
- efx_for_each_channel_tx_queue(tx_queue, channel)
- tx_queue->channel = NULL;
- }
- }
}
static int efx_probe_nic(struct efx_nic *efx)
@@ -1531,9 +1518,9 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
efx->irq_rx_adaptive = rx_adaptive;
efx->irq_rx_moderation = rx_ticks;
efx_for_each_channel(channel, efx) {
- if (efx_channel_get_rx_queue(channel))
+ if (efx_channel_has_rx_queue(channel))
channel->irq_moderation = rx_ticks;
- else if (efx_channel_get_tx_queue(channel, 0))
+ else if (efx_channel_has_tx_queues(channel))
channel->irq_moderation = tx_ticks;
}
}
@@ -1849,6 +1836,7 @@ static const struct net_device_ops efx_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = efx_netpoll,
#endif
+ .ndo_setup_tc = efx_setup_tc,
};
static void efx_update_name(struct efx_nic *efx)
@@ -1910,10 +1898,8 @@ static int efx_register_netdev(struct efx_nic *efx)
efx_for_each_channel(channel, efx) {
struct efx_tx_queue *tx_queue;
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- tx_queue->core_txq = netdev_get_tx_queue(
- efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES);
- }
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_init_tx_queue_core_txq(tx_queue);
}
/* Always start with carrier off; PHY events will detect the link */
@@ -2401,7 +2387,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
int i, rc;
/* Allocate and initialise a struct net_device and struct efx_nic */
- net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
+ net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
+ EFX_MAX_RX_QUEUES);
if (!net_dev)
return -ENOMEM;
net_dev->features |= (type->offload_features | NETIF_F_SG |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index d43a7e5..0cb198a 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -29,6 +29,7 @@
extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
extern netdev_tx_t
@@ -36,6 +37,7 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
extern netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
/* RX */
extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 0e8bb19..272cfe7 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -502,7 +502,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
{
struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev);
- unsigned long features;
+ u32 features;
features = NETIF_F_TSO;
if (efx->type->offload_features & NETIF_F_V6_CSUM)
@@ -519,7 +519,7 @@ static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
{
struct efx_nic *efx = netdev_priv(net_dev);
- unsigned long features = efx->type->offload_features & NETIF_F_ALL_CSUM;
+ u32 features = efx->type->offload_features & NETIF_F_ALL_CSUM;
if (enable)
net_dev->features |= features;
@@ -631,7 +631,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
/* Find lowest IRQ moderation across all used TX queues */
coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
efx_for_each_channel(channel, efx) {
- if (!efx_channel_get_tx_queue(channel, 0))
+ if (!efx_channel_has_tx_queues(channel))
continue;
if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
if (channel->channel < efx->n_rx_channels)
@@ -676,8 +676,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
/* If the channel is shared only allow RX parameters to be set */
efx_for_each_channel(channel, efx) {
- if (efx_channel_get_rx_queue(channel) &&
- efx_channel_get_tx_queue(channel, 0) &&
+ if (efx_channel_has_rx_queue(channel) &&
+ efx_channel_has_tx_queues(channel) &&
tx_usecs) {
netif_err(efx, drv, efx->net_dev, "Channel is shared. "
"Only RX coalescing may be set\n");
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 28df866..96e22ad 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -63,10 +63,12 @@
/* Checksum generation is a per-queue option in hardware, so each
* queue visible to the networking core is backed by two hardware TX
* queues. */
-#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS
-#define EFX_TXQ_TYPE_OFFLOAD 1
-#define EFX_TXQ_TYPES 2
-#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES)
+#define EFX_MAX_TX_TC 2
+#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
+#define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */
+#define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */
+#define EFX_TXQ_TYPES 4
+#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
/**
* struct efx_special_buffer - An Efx special buffer
@@ -140,6 +142,7 @@ struct efx_tx_buffer {
* @buffer: The software buffer ring
* @txd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1.
+ * @initialised: Has hardware queue been initialised?
* @flushed: Used when handling queue flushing
* @read_count: Current read pointer.
* This is the number of buffers that have been removed from both rings.
@@ -182,6 +185,7 @@ struct efx_tx_queue {
struct efx_tx_buffer *buffer;
struct efx_special_buffer txd;
unsigned int ptr_mask;
+ bool initialised;
enum efx_flush_state flushed;
/* Members used mainly on the completion path */
@@ -377,7 +381,7 @@ struct efx_channel {
bool rx_pkt_csummed;
struct efx_rx_queue rx_queue;
- struct efx_tx_queue tx_queue[2];
+ struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
};
enum efx_led_mode {
@@ -906,7 +910,7 @@ struct efx_nic_type {
unsigned int phys_addr_channels;
unsigned int tx_dc_base;
unsigned int rx_dc_base;
- unsigned long offload_features;
+ u32 offload_features;
u32 reset_world_flags;
};
@@ -938,18 +942,40 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
}
+static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
+{
+ return channel->channel - channel->efx->tx_channel_offset <
+ channel->efx->n_tx_channels;
+}
+
static inline struct efx_tx_queue *
efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
{
- struct efx_tx_queue *tx_queue = channel->tx_queue;
- EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES);
- return tx_queue->channel ? tx_queue + type : NULL;
+ EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
+ type >= EFX_TXQ_TYPES);
+ return &channel->tx_queue[type];
+}
+
+static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
+{
+ return !(tx_queue->efx->net_dev->num_tc < 2 &&
+ tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
}
/* Iterate over all TX queues belonging to a channel */
#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
- for (_tx_queue = efx_channel_get_tx_queue(channel, 0); \
- _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
+ if (!efx_channel_has_tx_queues(_channel)) \
+ ; \
+ else \
+ for (_tx_queue = (_channel)->tx_queue; \
+ _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
+ efx_tx_queue_used(_tx_queue); \
+ _tx_queue++)
+
+/* Iterate over all possible TX queues belonging to a channel */
+#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
+ for (_tx_queue = (_channel)->tx_queue; \
+ _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
_tx_queue++)
static inline struct efx_rx_queue *
@@ -959,18 +985,26 @@ efx_get_rx_queue(struct efx_nic *efx, unsigned index)
return &efx->channel[index]->rx_queue;
}
+static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
+{
+ return channel->channel < channel->efx->n_rx_channels;
+}
+
static inline struct efx_rx_queue *
efx_channel_get_rx_queue(struct efx_channel *channel)
{
- return channel->channel < channel->efx->n_rx_channels ?
- &channel->rx_queue : NULL;
+ EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
+ return &channel->rx_queue;
}
/* Iterate over all RX queues belonging to a channel */
#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
- for (_rx_queue = efx_channel_get_rx_queue(channel); \
- _rx_queue; \
- _rx_queue = NULL)
+ if (!efx_channel_has_rx_queue(_channel)) \
+ ; \
+ else \
+ for (_rx_queue = &(_channel)->rx_queue; \
+ _rx_queue; \
+ _rx_queue = NULL)
static inline struct efx_channel *
efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index da38659..1d0b8b6 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -445,8 +445,8 @@ int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
{
- efx_oword_t tx_desc_ptr;
struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t reg;
tx_queue->flushed = FLUSH_NONE;
@@ -454,7 +454,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
efx_init_special_buffer(efx, &tx_queue->txd);
/* Push TX descriptor ring to card */
- EFX_POPULATE_OWORD_10(tx_desc_ptr,
+ EFX_POPULATE_OWORD_10(reg,
FRF_AZ_TX_DESCQ_EN, 1,
FRF_AZ_TX_ISCSI_DDIG_EN, 0,
FRF_AZ_TX_ISCSI_HDIG_EN, 0,
@@ -470,17 +470,15 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
- EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
- EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
!csum);
}
- efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
+ efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
tx_queue->queue);
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
- efx_oword_t reg;
-
/* Only 128 bits in this register */
BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
@@ -491,6 +489,16 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
set_bit_le(tx_queue->queue, (void *)&reg);
efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
}
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ EFX_POPULATE_OWORD_1(reg,
+ FRF_BZ_TX_PACE,
+ (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+ FFE_BZ_TX_PACE_OFF :
+ FFE_BZ_TX_PACE_RESERVED);
+ efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
+ tx_queue->queue);
+ }
}
static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
@@ -1238,8 +1246,10 @@ int efx_nic_flush_queues(struct efx_nic *efx)
/* Flush all tx queues in parallel */
efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel)
- efx_flush_tx_queue(tx_queue);
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->initialised)
+ efx_flush_tx_queue(tx_queue);
+ }
}
/* The hardware supports four concurrent rx flushes, each of which may
@@ -1262,8 +1272,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
++rx_pending;
}
}
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- if (tx_queue->flushed != FLUSH_DONE)
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->initialised &&
+ tx_queue->flushed != FLUSH_DONE)
++tx_pending;
}
}
@@ -1278,8 +1289,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
/* Mark the queues as all flushed. We're going to return failure
* leading to a reset, or fake up success anyway */
efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- if (tx_queue->flushed != FLUSH_DONE)
+ efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+ if (tx_queue->initialised &&
+ tx_queue->flushed != FLUSH_DONE)
netif_err(efx, hw, efx->net_dev,
"tx queue %d flush command timed out\n",
tx_queue->queue);
@@ -1682,6 +1694,19 @@ void efx_nic_init_common(struct efx_nic *efx)
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ EFX_POPULATE_OWORD_4(temp,
+ /* Default values */
+ FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
+ FRF_BZ_TX_PACE_SB_AF, 0xb,
+ FRF_BZ_TX_PACE_FB_BASE, 0,
+ /* Allow large pace values in the
+ * fast bin. */
+ FRF_BZ_TX_PACE_BIN_TH,
+ FFE_BZ_TX_PACE_RESERVED);
+ efx_writeo(efx, &temp, FR_BZ_TX_PACE);
+ }
}
/* Register dump */
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h
index 96430ed..8227de6 100644
--- a/drivers/net/sfc/regs.h
+++ b/drivers/net/sfc/regs.h
@@ -2907,6 +2907,12 @@
#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44
#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16
+/* TX_PACE_TBL */
+/* Values >20 are documented as reserved, but will result in a queue going
+ * into the fast bin with a pace value of zero. */
+#define FFE_BZ_TX_PACE_OFF 0
+#define FFE_BZ_TX_PACE_RESERVED 21
+
/* DRIVER_EV */
/* Sub-fields of an RX flush completion event */
#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 0ebfb99..f936892 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -644,7 +644,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
goto out;
}
- /* Test both types of TX queue */
+ /* Test all enabled types of TX queue */
efx_for_each_channel_tx_queue(tx_queue, channel) {
state->offload_csum = (tx_queue->queue &
EFX_TXQ_TYPE_OFFLOAD);
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 2f5e9da..1a51653 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -336,17 +336,91 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_tx_queue *tx_queue;
+ unsigned index, type;
if (unlikely(efx->port_inhibited))
return NETDEV_TX_BUSY;
- tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb),
- skb->ip_summed == CHECKSUM_PARTIAL ?
- EFX_TXQ_TYPE_OFFLOAD : 0);
+ index = skb_get_queue_mapping(skb);
+ type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
+ if (index >= efx->n_tx_channels) {
+ index -= efx->n_tx_channels;
+ type |= EFX_TXQ_TYPE_HIGHPRI;
+ }
+ tx_queue = efx_get_tx_queue(efx, index, type);
return efx_enqueue_skb(tx_queue, skb);
}
+void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+
+ /* Must be inverse of queue lookup in efx_hard_start_xmit() */
+ tx_queue->core_txq =
+ netdev_get_tx_queue(efx->net_dev,
+ tx_queue->queue / EFX_TXQ_TYPES +
+ ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+ efx->n_tx_channels : 0));
+}
+
+int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ unsigned tc;
+ int rc;
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
+ return -EINVAL;
+
+ if (num_tc == net_dev->num_tc)
+ return 0;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
+ net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
+ }
+
+ if (num_tc > net_dev->num_tc) {
+ /* Initialise high-priority queues as necessary */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_possible_channel_tx_queue(tx_queue,
+ channel) {
+ if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
+ continue;
+ if (!tx_queue->buffer) {
+ rc = efx_probe_tx_queue(tx_queue);
+ if (rc)
+ return rc;
+ }
+ if (!tx_queue->initialised)
+ efx_init_tx_queue(tx_queue);
+ efx_init_tx_queue_core_txq(tx_queue);
+ }
+ }
+ } else {
+ /* Reduce number of classes before number of queues */
+ net_dev->num_tc = num_tc;
+ }
+
+ rc = netif_set_real_num_tx_queues(net_dev,
+ max_t(int, num_tc, 1) *
+ efx->n_tx_channels);
+ if (rc)
+ return rc;
+
+ /* Do not destroy high-priority queues when they become
+ * unused. We would have to flush them first, and it is
+ * fairly difficult to flush a subset of TX queues. Leave
+ * it to efx_fini_channels().
+ */
+
+ net_dev->num_tc = num_tc;
+ return 0;
+}
+
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned fill_level;
@@ -430,6 +504,8 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
/* Set up TX descriptor ring */
efx_nic_init_tx(tx_queue);
+
+ tx_queue->initialised = true;
}
void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
@@ -452,9 +528,14 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
{
+ if (!tx_queue->initialised)
+ return;
+
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
"shutting down TX queue %d\n", tx_queue->queue);
+ tx_queue->initialised = false;
+
/* Flush TX queue, remove descriptor ring */
efx_nic_fini_tx(tx_queue);
@@ -466,6 +547,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
{
+ if (!tx_queue->buffer)
+ return;
+
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
"destroying TX queue %d\n", tx_queue->queue);
efx_nic_remove_tx(tx_queue);
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 726df61..43654a3 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -81,6 +81,7 @@ static const char version[] =
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/workqueue.h>
+#include <linux/of.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -2394,6 +2395,15 @@ static int smc_drv_resume(struct device *dev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id smc91x_match[] = {
+ { .compatible = "smsc,lan91c94", },
+ { .compatible = "smsc,lan91c111", },
+ {},
+}
+MODULE_DEVICE_TABLE(of, smc91x_match);
+#endif
+
static struct dev_pm_ops smc_drv_pm_ops = {
.suspend = smc_drv_suspend,
.resume = smc_drv_resume,
@@ -2406,6 +2416,9 @@ static struct platform_driver smc_driver = {
.name = CARDNAME,
.owner = THIS_MODULE,
.pm = &smc_drv_pm_ops,
+#ifdef CONFIG_OF
+ .of_match_table = smc91x_match,
+#endif
},
};
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 1c5408f..c1a3448 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -320,28 +320,28 @@ static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
if (txmac_stat & MAC_TXSTAT_URUN) {
netdev_err(dev, "TX MAC xmit underrun\n");
- gp->net_stats.tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
}
if (txmac_stat & MAC_TXSTAT_MPE) {
netdev_err(dev, "TX MAC max packet size error\n");
- gp->net_stats.tx_errors++;
+ dev->stats.tx_errors++;
}
/* The rest are all cases of one of the 16-bit TX
* counters expiring.
*/
if (txmac_stat & MAC_TXSTAT_NCE)
- gp->net_stats.collisions += 0x10000;
+ dev->stats.collisions += 0x10000;
if (txmac_stat & MAC_TXSTAT_ECE) {
- gp->net_stats.tx_aborted_errors += 0x10000;
- gp->net_stats.collisions += 0x10000;
+ dev->stats.tx_aborted_errors += 0x10000;
+ dev->stats.collisions += 0x10000;
}
if (txmac_stat & MAC_TXSTAT_LCE) {
- gp->net_stats.tx_aborted_errors += 0x10000;
- gp->net_stats.collisions += 0x10000;
+ dev->stats.tx_aborted_errors += 0x10000;
+ dev->stats.collisions += 0x10000;
}
/* We do not keep track of MAC_TXSTAT_FCE and
@@ -469,20 +469,20 @@ static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s
u32 smac = readl(gp->regs + MAC_SMACHINE);
netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
- gp->net_stats.rx_over_errors++;
- gp->net_stats.rx_fifo_errors++;
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_fifo_errors++;
ret = gem_rxmac_reset(gp);
}
if (rxmac_stat & MAC_RXSTAT_ACE)
- gp->net_stats.rx_frame_errors += 0x10000;
+ dev->stats.rx_frame_errors += 0x10000;
if (rxmac_stat & MAC_RXSTAT_CCE)
- gp->net_stats.rx_crc_errors += 0x10000;
+ dev->stats.rx_crc_errors += 0x10000;
if (rxmac_stat & MAC_RXSTAT_LCE)
- gp->net_stats.rx_length_errors += 0x10000;
+ dev->stats.rx_length_errors += 0x10000;
/* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
* events.
@@ -594,7 +594,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat
if (netif_msg_rx_err(gp))
printk(KERN_DEBUG "%s: no buffer for rx frame\n",
gp->dev->name);
- gp->net_stats.rx_dropped++;
+ dev->stats.rx_dropped++;
}
if (gem_status & GREG_STAT_RXTAGERR) {
@@ -602,7 +602,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat
if (netif_msg_rx_err(gp))
printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
gp->dev->name);
- gp->net_stats.rx_errors++;
+ dev->stats.rx_errors++;
goto do_reset;
}
@@ -684,7 +684,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
break;
}
gp->tx_skbs[entry] = NULL;
- gp->net_stats.tx_bytes += skb->len;
+ dev->stats.tx_bytes += skb->len;
for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
txd = &gp->init_block->txd[entry];
@@ -696,7 +696,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
entry = NEXT_TX(entry);
}
- gp->net_stats.tx_packets++;
+ dev->stats.tx_packets++;
dev_kfree_skb_irq(skb);
}
gp->tx_old = entry;
@@ -738,6 +738,7 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit)
static int gem_rx(struct gem *gp, int work_to_do)
{
+ struct net_device *dev = gp->dev;
int entry, drops, work_done = 0;
u32 done;
__sum16 csum;
@@ -782,15 +783,15 @@ static int gem_rx(struct gem *gp, int work_to_do)
len = (status & RXDCTRL_BUFSZ) >> 16;
if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
- gp->net_stats.rx_errors++;
+ dev->stats.rx_errors++;
if (len < ETH_ZLEN)
- gp->net_stats.rx_length_errors++;
+ dev->stats.rx_length_errors++;
if (len & RXDCTRL_BAD)
- gp->net_stats.rx_crc_errors++;
+ dev->stats.rx_crc_errors++;
/* We'll just return it to GEM. */
drop_it:
- gp->net_stats.rx_dropped++;
+ dev->stats.rx_dropped++;
goto next;
}
@@ -843,8 +844,8 @@ static int gem_rx(struct gem *gp, int work_to_do)
netif_receive_skb(skb);
- gp->net_stats.rx_packets++;
- gp->net_stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
next:
entry = NEXT_RX(entry);
@@ -2472,7 +2473,6 @@ static int gem_resume(struct pci_dev *pdev)
static struct net_device_stats *gem_get_stats(struct net_device *dev)
{
struct gem *gp = netdev_priv(dev);
- struct net_device_stats *stats = &gp->net_stats;
spin_lock_irq(&gp->lock);
spin_lock(&gp->tx_lock);
@@ -2481,17 +2481,17 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
* so we shield against this
*/
if (gp->running) {
- stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR);
+ dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
writel(0, gp->regs + MAC_FCSERR);
- stats->rx_frame_errors += readl(gp->regs + MAC_AERR);
+ dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
writel(0, gp->regs + MAC_AERR);
- stats->rx_length_errors += readl(gp->regs + MAC_LERR);
+ dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
writel(0, gp->regs + MAC_LERR);
- stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
- stats->collisions +=
+ dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
+ dev->stats.collisions +=
(readl(gp->regs + MAC_ECOLL) +
readl(gp->regs + MAC_LCOLL));
writel(0, gp->regs + MAC_ECOLL);
@@ -2501,7 +2501,7 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
- return &gp->net_stats;
+ return &dev->stats;
}
static int gem_set_mac_address(struct net_device *dev, void *addr)
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
index 1990546..ede0178 100644
--- a/drivers/net/sungem.h
+++ b/drivers/net/sungem.h
@@ -994,7 +994,6 @@ struct gem {
u32 status;
struct napi_struct napi;
- struct net_device_stats net_stats;
int tx_fifo_sz;
int rx_fifo_sz;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 06c0e503..6be4185 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2010 Broadcom Corporation.
+ * Copyright (C) 2005-2011 Broadcom Corporation.
*
* Firmware is:
* Derived from proprietary unpublished source code,
@@ -64,10 +64,10 @@
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 116
+#define TG3_MIN_NUM 117
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "December 3, 2010"
+#define DRV_MODULE_RELDATE "January 25, 2011"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -1776,9 +1776,29 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
tg3_phy_cl45_read(tp, MDIO_MMD_AN,
TG3_CL45_D7_EEERES_STAT, &val);
- if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
- val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
+ switch (val) {
+ case TG3_CL45_D7_EEERES_STAT_LP_1000T:
+ switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+ case ASIC_REV_5717:
+ case ASIC_REV_5719:
+ case ASIC_REV_57765:
+ /* Enable SM_DSP clock and tx 6dB coding. */
+ val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
+ MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
+ MII_TG3_AUXCTL_ACTL_TX_6DB;
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
+
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
+
+ /* Turn off SM_DSP clock. */
+ val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
+ MII_TG3_AUXCTL_ACTL_TX_6DB;
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
+ }
+ /* Fallthrough */
+ case TG3_CL45_D7_EEERES_STAT_LP_100TX:
tp->setlpicnt = 2;
+ }
}
if (!tp->setlpicnt) {
@@ -2968,11 +2988,19 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
MII_TG3_AUXCTL_ACTL_TX_6DB;
tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
- !tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
- tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2,
- val | MII_TG3_DSP_CH34TP2_HIBW01);
+ switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+ case ASIC_REV_5717:
+ case ASIC_REV_57765:
+ if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
+ tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
+ MII_TG3_DSP_CH34TP2_HIBW01);
+ /* Fall through */
+ case ASIC_REV_5719:
+ val = MII_TG3_DSP_TAP26_ALNOKO |
+ MII_TG3_DSP_TAP26_RMRXSTO |
+ MII_TG3_DSP_TAP26_OPCSINPT;
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
+ }
val = 0;
if (tp->link_config.autoneg == AUTONEG_ENABLE) {
@@ -7801,7 +7829,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
TG3_CPMU_DBTMR1_LNKIDLE_2047US);
tw32_f(TG3_CPMU_EEE_DBTMR2,
- TG3_CPMU_DBTMR1_APE_TX_2047US |
+ TG3_CPMU_DBTMR2_APE_TX_2047US |
TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
}
@@ -8075,8 +8103,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Program the jumbo buffer descriptor ring control
* blocks on those devices that have them.
*/
- if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
- !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
+ ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
+ !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))) {
/* Setup replenish threshold. */
tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
@@ -8194,8 +8223,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
val = tr32(TG3_RDMA_RSRVCTRL_REG);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
- val &= ~TG3_RDMA_RSRVCTRL_TXMRGN_MASK;
- val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B;
+ val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
+ TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
+ TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
+ val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
+ TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
+ TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
}
tw32(TG3_RDMA_RSRVCTRL_REG,
val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
@@ -8317,7 +8350,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
udelay(100);
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
+ if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
+ tp->irq_cnt > 1) {
val = tr32(MSGINT_MODE);
val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
tw32(MSGINT_MODE, val);
@@ -9057,7 +9091,8 @@ static void tg3_ints_init(struct tg3 *tp)
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
u32 msi_mode = tr32(MSGINT_MODE);
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+ if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
+ tp->irq_cnt > 1)
msi_mode |= MSGINT_MODE_MULTIVEC_EN;
tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
}
@@ -10833,13 +10868,16 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
if (loopback_mode == TG3_MAC_LOOPBACK) {
/* HW errata - mac loopback fails in some cases on 5780.
* Normal traffic and PHY loopback are not affected by
- * errata.
+ * errata. Also, the MAC loopback test is deprecated for
+ * all newer ASIC revisions.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
+ (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
return 0;
- mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
- MAC_MODE_PORT_INT_LPBACK;
+ mac_mode = tp->mac_mode &
+ ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
+ mac_mode |= MAC_MODE_PORT_INT_LPBACK;
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
mac_mode |= MAC_MODE_LINK_POLARITY;
if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
@@ -10861,7 +10899,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
tg3_writephy(tp, MII_BMCR, val);
udelay(40);
- mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
+ mac_mode = tp->mac_mode &
+ ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
tg3_writephy(tp, MII_TG3_FET_PTEST,
MII_TG3_FET_PTEST_FRC_TX_LINK |
@@ -10889,6 +10928,13 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
MII_TG3_EXT_CTRL_LNK3_LED_MODE);
}
tw32(MAC_MODE, mac_mode);
+
+ /* Wait for link */
+ for (i = 0; i < 100; i++) {
+ if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+ break;
+ mdelay(1);
+ }
} else {
return -EINVAL;
}
@@ -10995,14 +11041,19 @@ out:
static int tg3_test_loopback(struct tg3 *tp)
{
int err = 0;
- u32 cpmuctrl = 0;
+ u32 eee_cap, cpmuctrl = 0;
if (!netif_running(tp->dev))
return TG3_LOOPBACK_FAILED;
+ eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
+ tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
+
err = tg3_reset_hw(tp, 1);
- if (err)
- return TG3_LOOPBACK_FAILED;
+ if (err) {
+ err = TG3_LOOPBACK_FAILED;
+ goto done;
+ }
/* Turn off gphy autopowerdown. */
if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
@@ -11022,8 +11073,10 @@ static int tg3_test_loopback(struct tg3 *tp)
udelay(10);
}
- if (status != CPMU_MUTEX_GNT_DRIVER)
- return TG3_LOOPBACK_FAILED;
+ if (status != CPMU_MUTEX_GNT_DRIVER) {
+ err = TG3_LOOPBACK_FAILED;
+ goto done;
+ }
/* Turn off link-based power management. */
cpmuctrl = tr32(TG3_CPMU_CTRL);
@@ -11052,6 +11105,9 @@ static int tg3_test_loopback(struct tg3 *tp)
if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
tg3_phy_toggle_apd(tp, true);
+done:
+ tp->phy_flags |= eee_cap;
+
return err;
}
@@ -12407,9 +12463,11 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
}
done:
- device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
- device_set_wakeup_enable(&tp->pdev->dev,
+ if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
+ device_set_wakeup_enable(&tp->pdev->dev,
tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
+ else
+ device_set_wakeup_capable(&tp->pdev->dev, false);
}
static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
@@ -13262,7 +13320,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
}
/* Determine TSO capabilities */
- if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ ; /* Do nothing. HW bug. */
+ else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
@@ -13313,7 +13373,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
}
- if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
+ if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
@@ -13331,42 +13392,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
tp->pcie_readrq = 4096;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
- u16 word;
-
- pci_read_config_word(tp->pdev,
- tp->pcie_cap + PCI_EXP_LNKSTA,
- &word);
- switch (word & PCI_EXP_LNKSTA_CLS) {
- case PCI_EXP_LNKSTA_CLS_2_5GB:
- word &= PCI_EXP_LNKSTA_NLW;
- word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
- switch (word) {
- case 2:
- tp->pcie_readrq = 2048;
- break;
- case 4:
- tp->pcie_readrq = 1024;
- break;
- }
- break;
-
- case PCI_EXP_LNKSTA_CLS_5_0GB:
- word &= PCI_EXP_LNKSTA_NLW;
- word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
- switch (word) {
- case 1:
- tp->pcie_readrq = 2048;
- break;
- case 2:
- tp->pcie_readrq = 1024;
- break;
- case 4:
- tp->pcie_readrq = 512;
- break;
- }
- }
- }
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ tp->pcie_readrq = 2048;
pcie_set_readrq(tp->pdev, tp->pcie_readrq);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index f528243..73884b6 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2007-2010 Broadcom Corporation.
+ * Copyright (C) 2007-2011 Broadcom Corporation.
*/
#ifndef _T3_H
@@ -141,6 +141,7 @@
#define CHIPREV_ID_57780_A1 0x57780001
#define CHIPREV_ID_5717_A0 0x05717000
#define CHIPREV_ID_57765_A0 0x57785000
+#define CHIPREV_ID_5719_A0 0x05719000
#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
#define ASIC_REV_5700 0x07
#define ASIC_REV_5701 0x00
@@ -1105,7 +1106,7 @@
#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000
#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000070ff
#define TG3_CPMU_EEE_DBTMR2 0x000036b8
-#define TG3_CPMU_DBTMR1_APE_TX_2047US 0x07ff0000
+#define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000
#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000070ff
#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc
#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000
@@ -1333,6 +1334,10 @@
#define TG3_RDMA_RSRVCTRL_REG 0x00004900
#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004
+#define TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K 0x00000c00
+#define TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK 0x00000ff0
+#define TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K 0x000c0000
+#define TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK 0x000ff000
#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000
#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000
/* 0x4904 --> 0x4910 unused */
@@ -2108,6 +2113,10 @@
#define MII_TG3_DSP_TAP1 0x0001
#define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007
+#define MII_TG3_DSP_TAP26 0x001a
+#define MII_TG3_DSP_TAP26_ALNOKO 0x0001
+#define MII_TG3_DSP_TAP26_RMRXSTO 0x0002
+#define MII_TG3_DSP_TAP26_OPCSINPT 0x0004
#define MII_TG3_DSP_AADJ1CH0 0x001f
#define MII_TG3_DSP_CH34TP2 0x4022
#define MII_TG3_DSP_CH34TP2_HIBW01 0x0010
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index f8e463c..e48a808 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -63,45 +63,45 @@
* - Other minor stuff
*
* v1.4 Feb 10, 2000 - Updated with more changes required after Dave's
- * network cleanup in 2.3.43pre7 (Tigran & myself)
- * - Minor stuff.
+ * network cleanup in 2.3.43pre7 (Tigran & myself)
+ * - Minor stuff.
*
- * v1.5 March 22, 2000 - Fixed another timer bug that would hang the driver
- * if no cable/link were present.
+ * v1.5 March 22, 2000 - Fixed another timer bug that would hang the
+ * driver if no cable/link were present.
* - Cosmetic changes.
* - TODO: Port completely to new PCI/DMA API
- * Auto-Neg fallback.
- *
- * v1.6 April 04, 2000 - Fixed driver support for kernel-parameters. Haven't
- * tested it though, as the kernel support is currently
- * broken (2.3.99p4p3).
- * - Updated tlan.txt accordingly.
- * - Adjusted minimum/maximum frame length.
- * - There is now a TLAN website up at
- * http://hp.sourceforge.net/
- *
- * v1.7 April 07, 2000 - Started to implement custom ioctls. Driver now
- * reports PHY information when used with Donald
- * Beckers userspace MII diagnostics utility.
- *
- * v1.8 April 23, 2000 - Fixed support for forced speed/duplex settings.
- * - Added link information to Auto-Neg and forced
- * modes. When NIC operates with auto-neg the driver
- * will report Link speed & duplex modes as well as
- * link partner abilities. When forced link is used,
- * the driver will report status of the established
- * link.
- * Please read tlan.txt for additional information.
- * - Removed call to check_region(), and used
- * return value of request_region() instead.
+ * Auto-Neg fallback.
+ *
+ * v1.6 April 04, 2000 - Fixed driver support for kernel-parameters.
+ * Haven't tested it though, as the kernel support
+ * is currently broken (2.3.99p4p3).
+ * - Updated tlan.txt accordingly.
+ * - Adjusted minimum/maximum frame length.
+ * - There is now a TLAN website up at
+ * http://hp.sourceforge.net/
+ *
+ * v1.7 April 07, 2000 - Started to implement custom ioctls. Driver now
+ * reports PHY information when used with Donald
+ * Beckers userspace MII diagnostics utility.
+ *
+ * v1.8 April 23, 2000 - Fixed support for forced speed/duplex settings.
+ * - Added link information to Auto-Neg and forced
+ * modes. When NIC operates with auto-neg the driver
+ * will report Link speed & duplex modes as well as
+ * link partner abilities. When forced link is used,
+ * the driver will report status of the established
+ * link.
+ * Please read tlan.txt for additional information.
+ * - Removed call to check_region(), and used
+ * return value of request_region() instead.
*
* v1.8a May 28, 2000 - Minor updates.
*
* v1.9 July 25, 2000 - Fixed a few remaining Full-Duplex issues.
- * - Updated with timer fixes from Andrew Morton.
- * - Fixed module race in TLan_Open.
- * - Added routine to monitor PHY status.
- * - Added activity led support for Proliant devices.
+ * - Updated with timer fixes from Andrew Morton.
+ * - Fixed module race in TLan_Open.
+ * - Added routine to monitor PHY status.
+ * - Added activity led support for Proliant devices.
*
* v1.10 Aug 30, 2000 - Added support for EISA based tlan controllers
* like the Compaq NetFlex3/E.
@@ -111,8 +111,8 @@
* hardware probe is done with kernel API and
* TLan_EisaProbe.
* - Adjusted debug information for probing.
- * - Fixed bug that would cause general debug information
- * to be printed after driver removal.
+ * - Fixed bug that would cause general debug
+ * information to be printed after driver removal.
* - Added transmit timeout handling.
* - Fixed OOM return values in tlan_probe.
* - Fixed possible mem leak in tlan_exit
@@ -136,8 +136,8 @@
*
* v1.12 Oct 12, 2000 - Minor fixes (memleak, init, etc.)
*
- * v1.13 Nov 28, 2000 - Stop flooding console with auto-neg issues
- * when link can't be established.
+ * v1.13 Nov 28, 2000 - Stop flooding console with auto-neg issues
+ * when link can't be established.
* - Added the bbuf option as a kernel parameter.
* - Fixed ioaddr probe bug.
* - Fixed stupid deadlock with MII interrupts.
@@ -147,28 +147,30 @@
* TLAN v1.0 silicon. This needs to be investigated
* further.
*
- * v1.14 Dec 16, 2000 - Added support for servicing multiple frames per.
- * interrupt. Thanks goes to
- * Adam Keys <adam@ti.com>
- * Denis Beaudoin <dbeaudoin@ti.com>
- * for providing the patch.
- * - Fixed auto-neg output when using multiple
- * adapters.
- * - Converted to use new taskq interface.
+ * v1.14 Dec 16, 2000 - Added support for servicing multiple frames per.
+ * interrupt. Thanks goes to
+ * Adam Keys <adam@ti.com>
+ * Denis Beaudoin <dbeaudoin@ti.com>
+ * for providing the patch.
+ * - Fixed auto-neg output when using multiple
+ * adapters.
+ * - Converted to use new taskq interface.
*
- * v1.14a Jan 6, 2001 - Minor adjustments (spinlocks, etc.)
+ * v1.14a Jan 6, 2001 - Minor adjustments (spinlocks, etc.)
*
* Samuel Chessman <chessman@tux.org> New Maintainer!
*
* v1.15 Apr 4, 2002 - Correct operation when aui=1 to be
- * 10T half duplex no loopback
- * Thanks to Gunnar Eikman
+ * 10T half duplex no loopback
+ * Thanks to Gunnar Eikman
*
* Sakari Ailus <sakari.ailus@iki.fi>:
*
* v1.15a Dec 15 2008 - Remove bbuf support, it doesn't work anyway.
+ * v1.16 Jan 6 2011 - Make checkpatch.pl happy.
+ * v1.17 Jan 6 2011 - Add suspend/resume support.
*
- *******************************************************************************/
+ ******************************************************************************/
#include <linux/module.h>
#include <linux/init.h>
@@ -185,13 +187,11 @@
#include "tlan.h"
-typedef u32 (TLanIntVectorFunc)( struct net_device *, u16 );
-
/* For removing EISA devices */
-static struct net_device *TLan_Eisa_Devices;
+static struct net_device *tlan_eisa_devices;
-static int TLanDevicesInstalled;
+static int tlan_devices_installed;
/* Set speed, duplex and aui settings */
static int aui[MAX_TLAN_BOARDS];
@@ -202,7 +202,8 @@ module_param_array(aui, int, NULL, 0);
module_param_array(duplex, int, NULL, 0);
module_param_array(speed, int, NULL, 0);
MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
-MODULE_PARM_DESC(duplex, "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
+MODULE_PARM_DESC(duplex,
+ "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
MODULE_PARM_DESC(speed, "ThunderLAN port speen setting(s) (0,10,100)");
MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
@@ -218,139 +219,144 @@ static int debug;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
-static const char TLanSignature[] = "TLAN";
-static const char tlan_banner[] = "ThunderLAN driver v1.15a\n";
+static const char tlan_signature[] = "TLAN";
+static const char tlan_banner[] = "ThunderLAN driver v1.17\n";
static int tlan_have_pci;
static int tlan_have_eisa;
-static const char *media[] = {
- "10BaseT-HD ", "10BaseT-FD ","100baseTx-HD ",
- "100baseTx-FD", "100baseT4", NULL
+static const char * const media[] = {
+ "10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
+ "100BaseTx-FD", "100BaseT4", NULL
};
static struct board {
- const char *deviceLabel;
- u32 flags;
- u16 addrOfs;
+ const char *device_label;
+ u32 flags;
+ u16 addr_ofs;
} board_info[] = {
{ "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
- { "Compaq Netelligent 10/100 TX PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+ { "Compaq Netelligent 10/100 TX PCI UTP",
+ TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
{ "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
{ "Compaq NetFlex-3/P",
TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
{ "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
{ "Compaq Netelligent Integrated 10/100 TX UTP",
TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
- { "Compaq Netelligent Dual 10/100 TX PCI UTP", TLAN_ADAPTER_NONE, 0x83 },
- { "Compaq Netelligent 10/100 TX Embedded UTP", TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq Netelligent Dual 10/100 TX PCI UTP",
+ TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq Netelligent 10/100 TX Embedded UTP",
+ TLAN_ADAPTER_NONE, 0x83 },
{ "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
- { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xF8 },
- { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xF8 },
+ { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
+ { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
{ "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
- { "Compaq Netelligent 10 T/2 PCI UTP/Coax", TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
{ "Compaq NetFlex-3/E",
- TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */
+ TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */
TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
- { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
+ { "Compaq NetFlex-3/E",
+ TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
};
static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
{ PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
{ PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
{ PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
-static void TLan_EisaProbe( void );
-static void TLan_Eisa_Cleanup( void );
-static int TLan_Init( struct net_device * );
-static int TLan_Open( struct net_device *dev );
-static netdev_tx_t TLan_StartTx( struct sk_buff *, struct net_device *);
-static irqreturn_t TLan_HandleInterrupt( int, void *);
-static int TLan_Close( struct net_device *);
-static struct net_device_stats *TLan_GetStats( struct net_device *);
-static void TLan_SetMulticastList( struct net_device *);
-static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd);
-static int TLan_probe1( struct pci_dev *pdev, long ioaddr,
- int irq, int rev, const struct pci_device_id *ent);
-static void TLan_tx_timeout( struct net_device *dev);
-static void TLan_tx_timeout_work(struct work_struct *work);
-static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent);
-
-static u32 TLan_HandleTxEOF( struct net_device *, u16 );
-static u32 TLan_HandleStatOverflow( struct net_device *, u16 );
-static u32 TLan_HandleRxEOF( struct net_device *, u16 );
-static u32 TLan_HandleDummy( struct net_device *, u16 );
-static u32 TLan_HandleTxEOC( struct net_device *, u16 );
-static u32 TLan_HandleStatusCheck( struct net_device *, u16 );
-static u32 TLan_HandleRxEOC( struct net_device *, u16 );
-
-static void TLan_Timer( unsigned long );
-
-static void TLan_ResetLists( struct net_device * );
-static void TLan_FreeLists( struct net_device * );
-static void TLan_PrintDio( u16 );
-static void TLan_PrintList( TLanList *, char *, int );
-static void TLan_ReadAndClearStats( struct net_device *, int );
-static void TLan_ResetAdapter( struct net_device * );
-static void TLan_FinishReset( struct net_device * );
-static void TLan_SetMac( struct net_device *, int areg, char *mac );
-
-static void TLan_PhyPrint( struct net_device * );
-static void TLan_PhyDetect( struct net_device * );
-static void TLan_PhyPowerDown( struct net_device * );
-static void TLan_PhyPowerUp( struct net_device * );
-static void TLan_PhyReset( struct net_device * );
-static void TLan_PhyStartLink( struct net_device * );
-static void TLan_PhyFinishAutoNeg( struct net_device * );
+static void tlan_eisa_probe(void);
+static void tlan_eisa_cleanup(void);
+static int tlan_init(struct net_device *);
+static int tlan_open(struct net_device *dev);
+static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
+static irqreturn_t tlan_handle_interrupt(int, void *);
+static int tlan_close(struct net_device *);
+static struct net_device_stats *tlan_get_stats(struct net_device *);
+static void tlan_set_multicast_list(struct net_device *);
+static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int tlan_probe1(struct pci_dev *pdev, long ioaddr,
+ int irq, int rev, const struct pci_device_id *ent);
+static void tlan_tx_timeout(struct net_device *dev);
+static void tlan_tx_timeout_work(struct work_struct *work);
+static int tlan_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+
+static u32 tlan_handle_tx_eof(struct net_device *, u16);
+static u32 tlan_handle_stat_overflow(struct net_device *, u16);
+static u32 tlan_handle_rx_eof(struct net_device *, u16);
+static u32 tlan_handle_dummy(struct net_device *, u16);
+static u32 tlan_handle_tx_eoc(struct net_device *, u16);
+static u32 tlan_handle_status_check(struct net_device *, u16);
+static u32 tlan_handle_rx_eoc(struct net_device *, u16);
+
+static void tlan_timer(unsigned long);
+
+static void tlan_reset_lists(struct net_device *);
+static void tlan_free_lists(struct net_device *);
+static void tlan_print_dio(u16);
+static void tlan_print_list(struct tlan_list *, char *, int);
+static void tlan_read_and_clear_stats(struct net_device *, int);
+static void tlan_reset_adapter(struct net_device *);
+static void tlan_finish_reset(struct net_device *);
+static void tlan_set_mac(struct net_device *, int areg, char *mac);
+
+static void tlan_phy_print(struct net_device *);
+static void tlan_phy_detect(struct net_device *);
+static void tlan_phy_power_down(struct net_device *);
+static void tlan_phy_power_up(struct net_device *);
+static void tlan_phy_reset(struct net_device *);
+static void tlan_phy_start_link(struct net_device *);
+static void tlan_phy_finish_auto_neg(struct net_device *);
#ifdef MONITOR
-static void TLan_PhyMonitor( struct net_device * );
+static void tlan_phy_monitor(struct net_device *);
#endif
/*
-static int TLan_PhyNop( struct net_device * );
-static int TLan_PhyInternalCheck( struct net_device * );
-static int TLan_PhyInternalService( struct net_device * );
-static int TLan_PhyDp83840aCheck( struct net_device * );
+ static int tlan_phy_nop(struct net_device *);
+ static int tlan_phy_internal_check(struct net_device *);
+ static int tlan_phy_internal_service(struct net_device *);
+ static int tlan_phy_dp83840a_check(struct net_device *);
*/
-static bool TLan_MiiReadReg( struct net_device *, u16, u16, u16 * );
-static void TLan_MiiSendData( u16, u32, unsigned );
-static void TLan_MiiSync( u16 );
-static void TLan_MiiWriteReg( struct net_device *, u16, u16, u16 );
+static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
+static void tlan_mii_send_data(u16, u32, unsigned);
+static void tlan_mii_sync(u16);
+static void tlan_mii_write_reg(struct net_device *, u16, u16, u16);
-static void TLan_EeSendStart( u16 );
-static int TLan_EeSendByte( u16, u8, int );
-static void TLan_EeReceiveByte( u16, u8 *, int );
-static int TLan_EeReadByte( struct net_device *, u8, u8 * );
+static void tlan_ee_send_start(u16);
+static int tlan_ee_send_byte(u16, u8, int);
+static void tlan_ee_receive_byte(u16, u8 *, int);
+static int tlan_ee_read_byte(struct net_device *, u8, u8 *);
static inline void
-TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
+tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
{
unsigned long addr = (unsigned long)skb;
tag->buffer[9].address = addr;
@@ -358,7 +364,7 @@ TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
}
static inline struct sk_buff *
-TLan_GetSKB( const struct tlan_list_tag *tag)
+tlan_get_skb(const struct tlan_list *tag)
{
unsigned long addr;
@@ -367,50 +373,50 @@ TLan_GetSKB( const struct tlan_list_tag *tag)
return (struct sk_buff *) addr;
}
-
-static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = {
+static u32
+(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
NULL,
- TLan_HandleTxEOF,
- TLan_HandleStatOverflow,
- TLan_HandleRxEOF,
- TLan_HandleDummy,
- TLan_HandleTxEOC,
- TLan_HandleStatusCheck,
- TLan_HandleRxEOC
+ tlan_handle_tx_eof,
+ tlan_handle_stat_overflow,
+ tlan_handle_rx_eof,
+ tlan_handle_dummy,
+ tlan_handle_tx_eoc,
+ tlan_handle_status_check,
+ tlan_handle_rx_eoc
};
static inline void
-TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
+tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
unsigned long flags = 0;
if (!in_irq())
spin_lock_irqsave(&priv->lock, flags);
- if ( priv->timer.function != NULL &&
- priv->timerType != TLAN_TIMER_ACTIVITY ) {
+ if (priv->timer.function != NULL &&
+ priv->timer_type != TLAN_TIMER_ACTIVITY) {
if (!in_irq())
spin_unlock_irqrestore(&priv->lock, flags);
return;
}
- priv->timer.function = TLan_Timer;
+ priv->timer.function = tlan_timer;
if (!in_irq())
spin_unlock_irqrestore(&priv->lock, flags);
priv->timer.data = (unsigned long) dev;
- priv->timerSetAt = jiffies;
- priv->timerType = type;
+ priv->timer_set_at = jiffies;
+ priv->timer_type = type;
mod_timer(&priv->timer, jiffies + ticks);
-} /* TLan_SetTimer */
+}
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver Primary Functions
+ThunderLAN driver primary functions
- These functions are more or less common to all Linux network drivers.
+these functions are more or less common to all linux network drivers.
******************************************************************************
*****************************************************************************/
@@ -419,49 +425,117 @@ TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
- /***************************************************************
- * tlan_remove_one
- *
- * Returns:
- * Nothing
- * Parms:
- * None
- *
- * Goes through the TLanDevices list and frees the device
- * structs and memory associated with each device (lists
- * and buffers). It also ureserves the IO port regions
- * associated with this device.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_remove_one
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * None
+ *
+ * Goes through the TLanDevices list and frees the device
+ * structs and memory associated with each device (lists
+ * and buffers). It also ureserves the IO port regions
+ * associated with this device.
+ *
+ **************************************************************/
-static void __devexit tlan_remove_one( struct pci_dev *pdev)
+static void __devexit tlan_remove_one(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata( pdev );
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tlan_priv *priv = netdev_priv(dev);
- unregister_netdev( dev );
+ unregister_netdev(dev);
- if ( priv->dmaStorage ) {
- pci_free_consistent(priv->pciDev,
- priv->dmaSize, priv->dmaStorage,
- priv->dmaStorageDMA );
+ if (priv->dma_storage) {
+ pci_free_consistent(priv->pci_dev,
+ priv->dma_size, priv->dma_storage,
+ priv->dma_storage_dma);
}
#ifdef CONFIG_PCI
pci_release_regions(pdev);
#endif
- free_netdev( dev );
+ free_netdev(dev);
+
+ pci_set_drvdata(pdev, NULL);
+}
+
+static void tlan_start(struct net_device *dev)
+{
+ tlan_reset_lists(dev);
+ /* NOTE: It might not be necessary to read the stats before a
+ reset if you don't care what the values are.
+ */
+ tlan_read_and_clear_stats(dev, TLAN_IGNORE);
+ tlan_reset_adapter(dev);
+ netif_wake_queue(dev);
+}
+
+static void tlan_stop(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
+ outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
+ /* Reset and power down phy */
+ tlan_reset_adapter(dev);
+ if (priv->timer.function != NULL) {
+ del_timer_sync(&priv->timer);
+ priv->timer.function = NULL;
+ }
+}
+
+#ifdef CONFIG_PM
+
+static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (netif_running(dev))
+ tlan_stop(dev);
+
+ netif_device_detach(dev);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
- pci_set_drvdata( pdev, NULL );
+ return 0;
}
+static int tlan_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_enable_wake(pdev, 0, 0);
+ netif_device_attach(dev);
+
+ if (netif_running(dev))
+ tlan_start(dev);
+
+ return 0;
+}
+
+#else /* CONFIG_PM */
+
+#define tlan_suspend NULL
+#define tlan_resume NULL
+
+#endif /* CONFIG_PM */
+
+
static struct pci_driver tlan_driver = {
.name = "tlan",
.id_table = tlan_pci_tbl,
.probe = tlan_init_one,
.remove = __devexit_p(tlan_remove_one),
+ .suspend = tlan_suspend,
+ .resume = tlan_resume,
};
static int __init tlan_probe(void)
@@ -482,13 +556,13 @@ static int __init tlan_probe(void)
}
TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
- TLan_EisaProbe();
+ tlan_eisa_probe();
printk(KERN_INFO "TLAN: %d device%s installed, PCI: %d EISA: %d\n",
- TLanDevicesInstalled, TLanDevicesInstalled == 1 ? "" : "s",
- tlan_have_pci, tlan_have_eisa);
+ tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
+ tlan_have_pci, tlan_have_eisa);
- if (TLanDevicesInstalled == 0) {
+ if (tlan_devices_installed == 0) {
rc = -ENODEV;
goto err_out_pci_unreg;
}
@@ -501,39 +575,39 @@ err_out_pci_free:
}
-static int __devinit tlan_init_one( struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int __devinit tlan_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
- return TLan_probe1( pdev, -1, -1, 0, ent);
+ return tlan_probe1(pdev, -1, -1, 0, ent);
}
/*
- ***************************************************************
- * tlan_probe1
- *
- * Returns:
- * 0 on success, error code on error
- * Parms:
- * none
- *
- * The name is lower case to fit in with all the rest of
- * the netcard_probe names. This function looks for
- * another TLan based adapter, setting it up with the
- * allocated device struct if one is found.
- * tlan_probe has been ported to the new net API and
- * now allocates its own device structure. This function
- * is also used by modules.
- *
- **************************************************************/
-
-static int __devinit TLan_probe1(struct pci_dev *pdev,
+***************************************************************
+* tlan_probe1
+*
+* Returns:
+* 0 on success, error code on error
+* Parms:
+* none
+*
+* The name is lower case to fit in with all the rest of
+* the netcard_probe names. This function looks for
+* another TLan based adapter, setting it up with the
+* allocated device struct if one is found.
+* tlan_probe has been ported to the new net API and
+* now allocates its own device structure. This function
+* is also used by modules.
+*
+**************************************************************/
+
+static int __devinit tlan_probe1(struct pci_dev *pdev,
long ioaddr, int irq, int rev,
- const struct pci_device_id *ent )
+ const struct pci_device_id *ent)
{
struct net_device *dev;
- TLanPrivateInfo *priv;
+ struct tlan_priv *priv;
u16 device_id;
int reg, rc = -ENODEV;
@@ -543,7 +617,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
if (rc)
return rc;
- rc = pci_request_regions(pdev, TLanSignature);
+ rc = pci_request_regions(pdev, tlan_signature);
if (rc) {
printk(KERN_ERR "TLAN: Could not reserve IO regions\n");
goto err_out;
@@ -551,7 +625,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
}
#endif /* CONFIG_PCI */
- dev = alloc_etherdev(sizeof(TLanPrivateInfo));
+ dev = alloc_etherdev(sizeof(struct tlan_priv));
if (dev == NULL) {
printk(KERN_ERR "TLAN: Could not allocate memory for device.\n");
rc = -ENOMEM;
@@ -561,26 +635,28 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
priv = netdev_priv(dev);
- priv->pciDev = pdev;
+ priv->pci_dev = pdev;
priv->dev = dev;
/* Is this a PCI device? */
if (pdev) {
- u32 pci_io_base = 0;
+ u32 pci_io_base = 0;
priv->adapter = &board_info[ent->driver_data];
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- printk(KERN_ERR "TLAN: No suitable PCI mapping available.\n");
+ printk(KERN_ERR
+ "TLAN: No suitable PCI mapping available.\n");
goto err_out_free_dev;
}
- for ( reg= 0; reg <= 5; reg ++ ) {
+ for (reg = 0; reg <= 5; reg++) {
if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
pci_io_base = pci_resource_start(pdev, reg);
- TLAN_DBG( TLAN_DEBUG_GNRL, "IO mapping is available at %x.\n",
- pci_io_base);
+ TLAN_DBG(TLAN_DEBUG_GNRL,
+ "IO mapping is available at %x.\n",
+ pci_io_base);
break;
}
}
@@ -592,7 +668,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
dev->base_addr = pci_io_base;
dev->irq = pdev->irq;
- priv->adapterRev = pdev->revision;
+ priv->adapter_rev = pdev->revision;
pci_set_master(pdev);
pci_set_drvdata(pdev, dev);
@@ -602,11 +678,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
device_id = inw(ioaddr + EISA_ID2);
priv->is_eisa = 1;
if (device_id == 0x20F1) {
- priv->adapter = &board_info[13]; /* NetFlex-3/E */
- priv->adapterRev = 23; /* TLAN 2.3 */
+ priv->adapter = &board_info[13]; /* NetFlex-3/E */
+ priv->adapter_rev = 23; /* TLAN 2.3 */
} else {
priv->adapter = &board_info[14];
- priv->adapterRev = 10; /* TLAN 1.0 */
+ priv->adapter_rev = 10; /* TLAN 1.0 */
}
dev->base_addr = ioaddr;
dev->irq = irq;
@@ -620,11 +696,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
: (dev->mem_start & 0x18) >> 3;
- if (priv->speed == 0x1) {
+ if (priv->speed == 0x1)
priv->speed = TLAN_SPEED_10;
- } else if (priv->speed == 0x2) {
+ else if (priv->speed == 0x2)
priv->speed = TLAN_SPEED_100;
- }
+
debug = priv->debug = dev->mem_end;
} else {
priv->aui = aui[boards_found];
@@ -635,11 +711,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
/* This will be used when we get an adapter error from
* within our irq handler */
- INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work);
+ INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
spin_lock_init(&priv->lock);
- rc = TLan_Init(dev);
+ rc = tlan_init(dev);
if (rc) {
printk(KERN_ERR "TLAN: Could not set up device.\n");
goto err_out_free_dev;
@@ -652,29 +728,29 @@ static int __devinit TLan_probe1(struct pci_dev *pdev,
}
- TLanDevicesInstalled++;
+ tlan_devices_installed++;
boards_found++;
/* pdev is NULL if this is an EISA device */
if (pdev)
tlan_have_pci++;
else {
- priv->nextDevice = TLan_Eisa_Devices;
- TLan_Eisa_Devices = dev;
+ priv->next_device = tlan_eisa_devices;
+ tlan_eisa_devices = dev;
tlan_have_eisa++;
}
printk(KERN_INFO "TLAN: %s irq=%2d, io=%04x, %s, Rev. %d\n",
- dev->name,
- (int) dev->irq,
- (int) dev->base_addr,
- priv->adapter->deviceLabel,
- priv->adapterRev);
+ dev->name,
+ (int) dev->irq,
+ (int) dev->base_addr,
+ priv->adapter->device_label,
+ priv->adapter_rev);
return 0;
err_out_uninit:
- pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage,
- priv->dmaStorageDMA );
+ pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage,
+ priv->dma_storage_dma);
err_out_free_dev:
free_netdev(dev);
err_out_regions:
@@ -689,22 +765,23 @@ err_out:
}
-static void TLan_Eisa_Cleanup(void)
+static void tlan_eisa_cleanup(void)
{
struct net_device *dev;
- TLanPrivateInfo *priv;
+ struct tlan_priv *priv;
- while( tlan_have_eisa ) {
- dev = TLan_Eisa_Devices;
+ while (tlan_have_eisa) {
+ dev = tlan_eisa_devices;
priv = netdev_priv(dev);
- if (priv->dmaStorage) {
- pci_free_consistent(priv->pciDev, priv->dmaSize,
- priv->dmaStorage, priv->dmaStorageDMA );
+ if (priv->dma_storage) {
+ pci_free_consistent(priv->pci_dev, priv->dma_size,
+ priv->dma_storage,
+ priv->dma_storage_dma);
}
- release_region( dev->base_addr, 0x10);
- unregister_netdev( dev );
- TLan_Eisa_Devices = priv->nextDevice;
- free_netdev( dev );
+ release_region(dev->base_addr, 0x10);
+ unregister_netdev(dev);
+ tlan_eisa_devices = priv->next_device;
+ free_netdev(dev);
tlan_have_eisa--;
}
}
@@ -715,7 +792,7 @@ static void __exit tlan_exit(void)
pci_unregister_driver(&tlan_driver);
if (tlan_have_eisa)
- TLan_Eisa_Cleanup();
+ tlan_eisa_cleanup();
}
@@ -726,24 +803,24 @@ module_exit(tlan_exit);
- /**************************************************************
- * TLan_EisaProbe
- *
- * Returns: 0 on success, 1 otherwise
- *
- * Parms: None
- *
- *
- * This functions probes for EISA devices and calls
- * TLan_probe1 when one is found.
- *
- *************************************************************/
+/**************************************************************
+ * tlan_eisa_probe
+ *
+ * Returns: 0 on success, 1 otherwise
+ *
+ * Parms: None
+ *
+ *
+ * This functions probes for EISA devices and calls
+ * TLan_probe1 when one is found.
+ *
+ *************************************************************/
-static void __init TLan_EisaProbe (void)
+static void __init tlan_eisa_probe(void)
{
- long ioaddr;
- int rc = -ENODEV;
- int irq;
+ long ioaddr;
+ int rc = -ENODEV;
+ int irq;
u16 device_id;
if (!EISA_bus) {
@@ -754,15 +831,16 @@ static void __init TLan_EisaProbe (void)
/* Loop through all slots of the EISA bus */
for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
- TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n",
- (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID));
- TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n",
- (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2));
+ TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
+ (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
+ TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
+ (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
- TLAN_DBG(TLAN_DEBUG_PROBE, "Probing for EISA adapter at IO: 0x%4x : ",
- (int) ioaddr);
- if (request_region(ioaddr, 0x10, TLanSignature) == NULL)
+ TLAN_DBG(TLAN_DEBUG_PROBE,
+ "Probing for EISA adapter at IO: 0x%4x : ",
+ (int) ioaddr);
+ if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
goto out;
if (inw(ioaddr + EISA_ID) != 0x110E) {
@@ -772,326 +850,326 @@ static void __init TLan_EisaProbe (void)
device_id = inw(ioaddr + EISA_ID2);
if (device_id != 0x20F1 && device_id != 0x40F1) {
- release_region (ioaddr, 0x10);
+ release_region(ioaddr, 0x10);
goto out;
}
- if (inb(ioaddr + EISA_CR) != 0x1) { /* Check if adapter is enabled */
- release_region (ioaddr, 0x10);
+ /* check if adapter is enabled */
+ if (inb(ioaddr + EISA_CR) != 0x1) {
+ release_region(ioaddr, 0x10);
goto out2;
}
if (debug == 0x10)
- printk("Found one\n");
+ printk(KERN_INFO "Found one\n");
/* Get irq from board */
- switch (inb(ioaddr + 0xCC0)) {
- case(0x10):
- irq=5;
- break;
- case(0x20):
- irq=9;
- break;
- case(0x40):
- irq=10;
- break;
- case(0x80):
- irq=11;
- break;
- default:
- goto out;
+ switch (inb(ioaddr + 0xcc0)) {
+ case(0x10):
+ irq = 5;
+ break;
+ case(0x20):
+ irq = 9;
+ break;
+ case(0x40):
+ irq = 10;
+ break;
+ case(0x80):
+ irq = 11;
+ break;
+ default:
+ goto out;
}
/* Setup the newly found eisa adapter */
- rc = TLan_probe1( NULL, ioaddr, irq,
- 12, NULL);
+ rc = tlan_probe1(NULL, ioaddr, irq,
+ 12, NULL);
continue;
- out:
- if (debug == 0x10)
- printk("None found\n");
- continue;
+out:
+ if (debug == 0x10)
+ printk(KERN_INFO "None found\n");
+ continue;
- out2: if (debug == 0x10)
- printk("Card found but it is not enabled, skipping\n");
- continue;
+out2:
+ if (debug == 0x10)
+ printk(KERN_INFO "Card found but it is not enabled, skipping\n");
+ continue;
}
-} /* TLan_EisaProbe */
+}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static void TLan_Poll(struct net_device *dev)
+static void tlan_poll(struct net_device *dev)
{
disable_irq(dev->irq);
- TLan_HandleInterrupt(dev->irq, dev);
+ tlan_handle_interrupt(dev->irq, dev);
enable_irq(dev->irq);
}
#endif
-static const struct net_device_ops TLan_netdev_ops = {
- .ndo_open = TLan_Open,
- .ndo_stop = TLan_Close,
- .ndo_start_xmit = TLan_StartTx,
- .ndo_tx_timeout = TLan_tx_timeout,
- .ndo_get_stats = TLan_GetStats,
- .ndo_set_multicast_list = TLan_SetMulticastList,
- .ndo_do_ioctl = TLan_ioctl,
+static const struct net_device_ops tlan_netdev_ops = {
+ .ndo_open = tlan_open,
+ .ndo_stop = tlan_close,
+ .ndo_start_xmit = tlan_start_tx,
+ .ndo_tx_timeout = tlan_tx_timeout,
+ .ndo_get_stats = tlan_get_stats,
+ .ndo_set_multicast_list = tlan_set_multicast_list,
+ .ndo_do_ioctl = tlan_ioctl,
.ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = TLan_Poll,
+ .ndo_poll_controller = tlan_poll,
#endif
};
- /***************************************************************
- * TLan_Init
- *
- * Returns:
- * 0 on success, error code otherwise.
- * Parms:
- * dev The structure of the device to be
- * init'ed.
- *
- * This function completes the initialization of the
- * device structure and driver. It reserves the IO
- * addresses, allocates memory for the lists and bounce
- * buffers, retrieves the MAC address from the eeprom
- * and assignes the device's methods.
- *
- **************************************************************/
-
-static int TLan_Init( struct net_device *dev )
+/***************************************************************
+ * tlan_init
+ *
+ * Returns:
+ * 0 on success, error code otherwise.
+ * Parms:
+ * dev The structure of the device to be
+ * init'ed.
+ *
+ * This function completes the initialization of the
+ * device structure and driver. It reserves the IO
+ * addresses, allocates memory for the lists and bounce
+ * buffers, retrieves the MAC address from the eeprom
+ * and assignes the device's methods.
+ *
+ **************************************************************/
+
+static int tlan_init(struct net_device *dev)
{
int dma_size;
- int err;
+ int err;
int i;
- TLanPrivateInfo *priv;
+ struct tlan_priv *priv;
priv = netdev_priv(dev);
- dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS )
- * ( sizeof(TLanList) );
- priv->dmaStorage = pci_alloc_consistent(priv->pciDev,
- dma_size, &priv->dmaStorageDMA);
- priv->dmaSize = dma_size;
-
- if ( priv->dmaStorage == NULL ) {
- printk(KERN_ERR "TLAN: Could not allocate lists and buffers for %s.\n",
- dev->name );
+ dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
+ * (sizeof(struct tlan_list));
+ priv->dma_storage = pci_alloc_consistent(priv->pci_dev,
+ dma_size,
+ &priv->dma_storage_dma);
+ priv->dma_size = dma_size;
+
+ if (priv->dma_storage == NULL) {
+ printk(KERN_ERR
+ "TLAN: Could not allocate lists and buffers for %s.\n",
+ dev->name);
return -ENOMEM;
}
- memset( priv->dmaStorage, 0, dma_size );
- priv->rxList = (TLanList *) ALIGN((unsigned long)priv->dmaStorage, 8);
- priv->rxListDMA = ALIGN(priv->dmaStorageDMA, 8);
- priv->txList = priv->rxList + TLAN_NUM_RX_LISTS;
- priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS;
+ memset(priv->dma_storage, 0, dma_size);
+ priv->rx_list = (struct tlan_list *)
+ ALIGN((unsigned long)priv->dma_storage, 8);
+ priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
+ priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
+ priv->tx_list_dma =
+ priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
err = 0;
- for ( i = 0; i < 6 ; i++ )
- err |= TLan_EeReadByte( dev,
- (u8) priv->adapter->addrOfs + i,
- (u8 *) &dev->dev_addr[i] );
- if ( err ) {
+ for (i = 0; i < 6 ; i++)
+ err |= tlan_ee_read_byte(dev,
+ (u8) priv->adapter->addr_ofs + i,
+ (u8 *) &dev->dev_addr[i]);
+ if (err) {
printk(KERN_ERR "TLAN: %s: Error reading MAC from eeprom: %d\n",
- dev->name,
- err );
+ dev->name,
+ err);
}
dev->addr_len = 6;
netif_carrier_off(dev);
/* Device methods */
- dev->netdev_ops = &TLan_netdev_ops;
+ dev->netdev_ops = &tlan_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
return 0;
-} /* TLan_Init */
+}
- /***************************************************************
- * TLan_Open
- *
- * Returns:
- * 0 on success, error code otherwise.
- * Parms:
- * dev Structure of device to be opened.
- *
- * This routine puts the driver and TLAN adapter in a
- * state where it is ready to send and receive packets.
- * It allocates the IRQ, resets and brings the adapter
- * out of reset, and allows interrupts. It also delays
- * the startup for autonegotiation or sends a Rx GO
- * command to the adapter, as appropriate.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_open
+ *
+ * Returns:
+ * 0 on success, error code otherwise.
+ * Parms:
+ * dev Structure of device to be opened.
+ *
+ * This routine puts the driver and TLAN adapter in a
+ * state where it is ready to send and receive packets.
+ * It allocates the IRQ, resets and brings the adapter
+ * out of reset, and allows interrupts. It also delays
+ * the startup for autonegotiation or sends a Rx GO
+ * command to the adapter, as appropriate.
+ *
+ **************************************************************/
-static int TLan_Open( struct net_device *dev )
+static int tlan_open(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int err;
- priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION );
- err = request_irq( dev->irq, TLan_HandleInterrupt, IRQF_SHARED,
- dev->name, dev );
+ priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
+ err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
+ dev->name, dev);
- if ( err ) {
+ if (err) {
pr_err("TLAN: Cannot open %s because IRQ %d is already in use.\n",
- dev->name, dev->irq );
+ dev->name, dev->irq);
return err;
}
init_timer(&priv->timer);
- netif_start_queue(dev);
- /* NOTE: It might not be necessary to read the stats before a
- reset if you don't care what the values are.
- */
- TLan_ResetLists( dev );
- TLan_ReadAndClearStats( dev, TLAN_IGNORE );
- TLan_ResetAdapter( dev );
+ tlan_start(dev);
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
- dev->name, priv->tlanRev );
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
+ dev->name, priv->tlan_rev);
return 0;
-} /* TLan_Open */
+}
- /**************************************************************
- * TLan_ioctl
- *
- * Returns:
- * 0 on success, error code otherwise
- * Params:
- * dev structure of device to receive ioctl.
- *
- * rq ifreq structure to hold userspace data.
- *
- * cmd ioctl command.
- *
- *
- *************************************************************/
+/**************************************************************
+ * tlan_ioctl
+ *
+ * Returns:
+ * 0 on success, error code otherwise
+ * Params:
+ * dev structure of device to receive ioctl.
+ *
+ * rq ifreq structure to hold userspace data.
+ *
+ * cmd ioctl command.
+ *
+ *
+ *************************************************************/
-static int TLan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
struct mii_ioctl_data *data = if_mii(rq);
- u32 phy = priv->phy[priv->phyNum];
+ u32 phy = priv->phy[priv->phy_num];
- if (!priv->phyOnline)
+ if (!priv->phy_online)
return -EAGAIN;
- switch(cmd) {
- case SIOCGMIIPHY: /* Get address of MII PHY in use. */
- data->phy_id = phy;
+ switch (cmd) {
+ case SIOCGMIIPHY: /* get address of MII PHY in use. */
+ data->phy_id = phy;
- case SIOCGMIIREG: /* Read MII PHY register. */
- TLan_MiiReadReg(dev, data->phy_id & 0x1f,
- data->reg_num & 0x1f, &data->val_out);
- return 0;
+ case SIOCGMIIREG: /* read MII PHY register. */
+ tlan_mii_read_reg(dev, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, &data->val_out);
+ return 0;
- case SIOCSMIIREG: /* Write MII PHY register. */
- TLan_MiiWriteReg(dev, data->phy_id & 0x1f,
- data->reg_num & 0x1f, data->val_in);
- return 0;
- default:
- return -EOPNOTSUPP;
+ case SIOCSMIIREG: /* write MII PHY register. */
+ tlan_mii_write_reg(dev, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, data->val_in);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
}
-} /* tlan_ioctl */
+}
- /***************************************************************
- * TLan_tx_timeout
- *
- * Returns: nothing
- *
- * Params:
- * dev structure of device which timed out
- * during transmit.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_tx_timeout
+ *
+ * Returns: nothing
+ *
+ * Params:
+ * dev structure of device which timed out
+ * during transmit.
+ *
+ **************************************************************/
-static void TLan_tx_timeout(struct net_device *dev)
+static void tlan_tx_timeout(struct net_device *dev)
{
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
/* Ok so we timed out, lets see what we can do about it...*/
- TLan_FreeLists( dev );
- TLan_ResetLists( dev );
- TLan_ReadAndClearStats( dev, TLAN_IGNORE );
- TLan_ResetAdapter( dev );
+ tlan_free_lists(dev);
+ tlan_reset_lists(dev);
+ tlan_read_and_clear_stats(dev, TLAN_IGNORE);
+ tlan_reset_adapter(dev);
dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue( dev );
+ netif_wake_queue(dev);
}
- /***************************************************************
- * TLan_tx_timeout_work
- *
- * Returns: nothing
- *
- * Params:
- * work work item of device which timed out
- *
- **************************************************************/
+/***************************************************************
+ * tlan_tx_timeout_work
+ *
+ * Returns: nothing
+ *
+ * Params:
+ * work work item of device which timed out
+ *
+ **************************************************************/
-static void TLan_tx_timeout_work(struct work_struct *work)
+static void tlan_tx_timeout_work(struct work_struct *work)
{
- TLanPrivateInfo *priv =
- container_of(work, TLanPrivateInfo, tlan_tqueue);
+ struct tlan_priv *priv =
+ container_of(work, struct tlan_priv, tlan_tqueue);
- TLan_tx_timeout(priv->dev);
+ tlan_tx_timeout(priv->dev);
}
- /***************************************************************
- * TLan_StartTx
- *
- * Returns:
- * 0 on success, non-zero on failure.
- * Parms:
- * skb A pointer to the sk_buff containing the
- * frame to be sent.
- * dev The device to send the data on.
- *
- * This function adds a frame to the Tx list to be sent
- * ASAP. First it verifies that the adapter is ready and
- * there is room in the queue. Then it sets up the next
- * available list, copies the frame to the corresponding
- * buffer. If the adapter Tx channel is idle, it gives
- * the adapter a Tx Go command on the list, otherwise it
- * sets the forward address of the previous list to point
- * to this one. Then it frees the sk_buff.
- *
- **************************************************************/
-
-static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
+/***************************************************************
+ * tlan_start_tx
+ *
+ * Returns:
+ * 0 on success, non-zero on failure.
+ * Parms:
+ * skb A pointer to the sk_buff containing the
+ * frame to be sent.
+ * dev The device to send the data on.
+ *
+ * This function adds a frame to the Tx list to be sent
+ * ASAP. First it verifies that the adapter is ready and
+ * there is room in the queue. Then it sets up the next
+ * available list, copies the frame to the corresponding
+ * buffer. If the adapter Tx channel is idle, it gives
+ * the adapter a Tx Go command on the list, otherwise it
+ * sets the forward address of the previous list to point
+ * to this one. Then it frees the sk_buff.
+ *
+ **************************************************************/
+
+static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
dma_addr_t tail_list_phys;
- TLanList *tail_list;
+ struct tlan_list *tail_list;
unsigned long flags;
unsigned int txlen;
- if ( ! priv->phyOnline ) {
- TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
- dev->name );
+ if (!priv->phy_online) {
+ TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
+ dev->name);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1100,218 +1178,214 @@ static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
return NETDEV_TX_OK;
txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
- tail_list = priv->txList + priv->txTail;
- tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail;
+ tail_list = priv->tx_list + priv->tx_tail;
+ tail_list_phys =
+ priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
- if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) {
- TLAN_DBG( TLAN_DEBUG_TX,
- "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
- dev->name, priv->txHead, priv->txTail );
+ if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
+ dev->name, priv->tx_head, priv->tx_tail);
netif_stop_queue(dev);
- priv->txBusyCount++;
+ priv->tx_busy_count++;
return NETDEV_TX_BUSY;
}
tail_list->forward = 0;
- tail_list->buffer[0].address = pci_map_single(priv->pciDev,
+ tail_list->buffer[0].address = pci_map_single(priv->pci_dev,
skb->data, txlen,
PCI_DMA_TODEVICE);
- TLan_StoreSKB(tail_list, skb);
+ tlan_store_skb(tail_list, skb);
- tail_list->frameSize = (u16) txlen;
+ tail_list->frame_size = (u16) txlen;
tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
tail_list->buffer[1].count = 0;
tail_list->buffer[1].address = 0;
spin_lock_irqsave(&priv->lock, flags);
- tail_list->cStat = TLAN_CSTAT_READY;
- if ( ! priv->txInProgress ) {
- priv->txInProgress = 1;
- TLAN_DBG( TLAN_DEBUG_TX,
- "TRANSMIT: Starting TX on buffer %d\n", priv->txTail );
- outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM );
- outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD );
+ tail_list->c_stat = TLAN_CSTAT_READY;
+ if (!priv->tx_in_progress) {
+ priv->tx_in_progress = 1;
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: Starting TX on buffer %d\n",
+ priv->tx_tail);
+ outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
+ outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
} else {
- TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Adding buffer %d to TX channel\n",
- priv->txTail );
- if ( priv->txTail == 0 ) {
- ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: Adding buffer %d to TX channel\n",
+ priv->tx_tail);
+ if (priv->tx_tail == 0) {
+ (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
= tail_list_phys;
} else {
- ( priv->txList + ( priv->txTail - 1 ) )->forward
+ (priv->tx_list + (priv->tx_tail - 1))->forward
= tail_list_phys;
}
}
spin_unlock_irqrestore(&priv->lock, flags);
- CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS );
+ CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
return NETDEV_TX_OK;
-} /* TLan_StartTx */
+}
- /***************************************************************
- * TLan_HandleInterrupt
- *
- * Returns:
- * Nothing
- * Parms:
- * irq The line on which the interrupt
- * occurred.
- * dev_id A pointer to the device assigned to
- * this irq line.
- *
- * This function handles an interrupt generated by its
- * assigned TLAN adapter. The function deactivates
- * interrupts on its adapter, records the type of
- * interrupt, executes the appropriate subhandler, and
- * acknowdges the interrupt to the adapter (thus
- * re-enabling adapter interrupts.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_handle_interrupt
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * irq The line on which the interrupt
+ * occurred.
+ * dev_id A pointer to the device assigned to
+ * this irq line.
+ *
+ * This function handles an interrupt generated by its
+ * assigned TLAN adapter. The function deactivates
+ * interrupts on its adapter, records the type of
+ * interrupt, executes the appropriate subhandler, and
+ * acknowdges the interrupt to the adapter (thus
+ * re-enabling adapter interrupts.
+ *
+ **************************************************************/
-static irqreturn_t TLan_HandleInterrupt(int irq, void *dev_id)
+static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 host_int;
u16 type;
spin_lock(&priv->lock);
- host_int = inw( dev->base_addr + TLAN_HOST_INT );
- type = ( host_int & TLAN_HI_IT_MASK ) >> 2;
- if ( type ) {
+ host_int = inw(dev->base_addr + TLAN_HOST_INT);
+ type = (host_int & TLAN_HI_IT_MASK) >> 2;
+ if (type) {
u32 ack;
u32 host_cmd;
- outw( host_int, dev->base_addr + TLAN_HOST_INT );
- ack = TLanIntVector[type]( dev, host_int );
+ outw(host_int, dev->base_addr + TLAN_HOST_INT);
+ ack = tlan_int_vector[type](dev, host_int);
- if ( ack ) {
- host_cmd = TLAN_HC_ACK | ack | ( type << 18 );
- outl( host_cmd, dev->base_addr + TLAN_HOST_CMD );
+ if (ack) {
+ host_cmd = TLAN_HC_ACK | ack | (type << 18);
+ outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
}
}
spin_unlock(&priv->lock);
return IRQ_RETVAL(type);
-} /* TLan_HandleInterrupts */
+}
- /***************************************************************
- * TLan_Close
- *
- * Returns:
- * An error code.
- * Parms:
- * dev The device structure of the device to
- * close.
- *
- * This function shuts down the adapter. It records any
- * stats, puts the adapter into reset state, deactivates
- * its time as needed, and frees the irq it is using.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_close
+ *
+ * Returns:
+ * An error code.
+ * Parms:
+ * dev The device structure of the device to
+ * close.
+ *
+ * This function shuts down the adapter. It records any
+ * stats, puts the adapter into reset state, deactivates
+ * its time as needed, and frees the irq it is using.
+ *
+ **************************************************************/
-static int TLan_Close(struct net_device *dev)
+static int tlan_close(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
- netif_stop_queue(dev);
priv->neg_be_verbose = 0;
+ tlan_stop(dev);
- TLan_ReadAndClearStats( dev, TLAN_RECORD );
- outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD );
- if ( priv->timer.function != NULL ) {
- del_timer_sync( &priv->timer );
- priv->timer.function = NULL;
- }
-
- free_irq( dev->irq, dev );
- TLan_FreeLists( dev );
- TLAN_DBG( TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name );
+ free_irq(dev->irq, dev);
+ tlan_free_lists(dev);
+ TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
return 0;
-} /* TLan_Close */
+}
- /***************************************************************
- * TLan_GetStats
- *
- * Returns:
- * A pointer to the device's statistics structure.
- * Parms:
- * dev The device structure to return the
- * stats for.
- *
- * This function updates the devices statistics by reading
- * the TLAN chip's onboard registers. Then it returns the
- * address of the statistics structure.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_get_stats
+ *
+ * Returns:
+ * A pointer to the device's statistics structure.
+ * Parms:
+ * dev The device structure to return the
+ * stats for.
+ *
+ * This function updates the devices statistics by reading
+ * the TLAN chip's onboard registers. Then it returns the
+ * address of the statistics structure.
+ *
+ **************************************************************/
-static struct net_device_stats *TLan_GetStats( struct net_device *dev )
+static struct net_device_stats *tlan_get_stats(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int i;
/* Should only read stats if open ? */
- TLan_ReadAndClearStats( dev, TLAN_RECORD );
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
- TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
- priv->rxEocCount );
- TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
- priv->txBusyCount );
- if ( debug & TLAN_DEBUG_GNRL ) {
- TLan_PrintDio( dev->base_addr );
- TLan_PhyPrint( dev );
+ TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
+ priv->rx_eoc_count);
+ TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
+ priv->tx_busy_count);
+ if (debug & TLAN_DEBUG_GNRL) {
+ tlan_print_dio(dev->base_addr);
+ tlan_phy_print(dev);
}
- if ( debug & TLAN_DEBUG_LIST ) {
- for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ )
- TLan_PrintList( priv->rxList + i, "RX", i );
- for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ )
- TLan_PrintList( priv->txList + i, "TX", i );
+ if (debug & TLAN_DEBUG_LIST) {
+ for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
+ tlan_print_list(priv->rx_list + i, "RX", i);
+ for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
+ tlan_print_list(priv->tx_list + i, "TX", i);
}
return &dev->stats;
-} /* TLan_GetStats */
+}
- /***************************************************************
- * TLan_SetMulticastList
- *
- * Returns:
- * Nothing
- * Parms:
- * dev The device structure to set the
- * multicast list for.
- *
- * This function sets the TLAN adaptor to various receive
- * modes. If the IFF_PROMISC flag is set, promiscuous
- * mode is acitviated. Otherwise, promiscuous mode is
- * turned off. If the IFF_ALLMULTI flag is set, then
- * the hash table is set to receive all group addresses.
- * Otherwise, the first three multicast addresses are
- * stored in AREG_1-3, and the rest are selected via the
- * hash table, as necessary.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_set_multicast_list
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure to set the
+ * multicast list for.
+ *
+ * This function sets the TLAN adaptor to various receive
+ * modes. If the IFF_PROMISC flag is set, promiscuous
+ * mode is acitviated. Otherwise, promiscuous mode is
+ * turned off. If the IFF_ALLMULTI flag is set, then
+ * the hash table is set to receive all group addresses.
+ * Otherwise, the first three multicast addresses are
+ * stored in AREG_1-3, and the rest are selected via the
+ * hash table, as necessary.
+ *
+ **************************************************************/
-static void TLan_SetMulticastList( struct net_device *dev )
+static void tlan_set_multicast_list(struct net_device *dev)
{
struct netdev_hw_addr *ha;
u32 hash1 = 0;
@@ -1320,53 +1394,56 @@ static void TLan_SetMulticastList( struct net_device *dev )
u32 offset;
u8 tmp;
- if ( dev->flags & IFF_PROMISC ) {
- tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
- TLan_DioWrite8( dev->base_addr,
- TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF );
+ if (dev->flags & IFF_PROMISC) {
+ tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
+ tlan_dio_write8(dev->base_addr,
+ TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
} else {
- tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
- TLan_DioWrite8( dev->base_addr,
- TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF );
- if ( dev->flags & IFF_ALLMULTI ) {
- for ( i = 0; i < 3; i++ )
- TLan_SetMac( dev, i + 1, NULL );
- TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF );
- TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
+ tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
+ tlan_dio_write8(dev->base_addr,
+ TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
+ if (dev->flags & IFF_ALLMULTI) {
+ for (i = 0; i < 3; i++)
+ tlan_set_mac(dev, i + 1, NULL);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
+ 0xffffffff);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
+ 0xffffffff);
} else {
i = 0;
netdev_for_each_mc_addr(ha, dev) {
- if ( i < 3 ) {
- TLan_SetMac( dev, i + 1,
+ if (i < 3) {
+ tlan_set_mac(dev, i + 1,
(char *) &ha->addr);
} else {
- offset = TLan_HashFunc((u8 *)&ha->addr);
- if ( offset < 32 )
- hash1 |= ( 1 << offset );
+ offset =
+ tlan_hash_func((u8 *)&ha->addr);
+ if (offset < 32)
+ hash1 |= (1 << offset);
else
- hash2 |= ( 1 << ( offset - 32 ) );
+ hash2 |= (1 << (offset - 32));
}
i++;
}
- for ( ; i < 3; i++ )
- TLan_SetMac( dev, i + 1, NULL );
- TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, hash1 );
- TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, hash2 );
+ for ( ; i < 3; i++)
+ tlan_set_mac(dev, i + 1, NULL);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
}
}
-} /* TLan_SetMulticastList */
+}
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver Interrupt Vectors and Table
+ThunderLAN driver interrupt vectors and table
- Please see Chap. 4, "Interrupt Handling" of the "ThunderLAN
- Programmer's Guide" for more informations on handling interrupts
- generated by TLAN based adapters.
+please see chap. 4, "Interrupt Handling" of the "ThunderLAN
+Programmer's Guide" for more informations on handling interrupts
+generated by TLAN based adapters.
******************************************************************************
*****************************************************************************/
@@ -1374,46 +1451,48 @@ static void TLan_SetMulticastList( struct net_device *dev )
- /***************************************************************
- * TLan_HandleTxEOF
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This function handles Tx EOF interrupts which are raised
- * by the adapter when it has completed sending the
- * contents of a buffer. If detemines which list/buffer
- * was completed and resets it. If the buffer was the last
- * in the channel (EOC), then the function checks to see if
- * another buffer is ready to send, and if so, sends a Tx
- * Go command. Finally, the driver activates/continues the
- * activity LED.
- *
- **************************************************************/
-
-static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
+/***************************************************************
+ * tlan_handle_tx_eof
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles Tx EOF interrupts which are raised
+ * by the adapter when it has completed sending the
+ * contents of a buffer. If detemines which list/buffer
+ * was completed and resets it. If the buffer was the last
+ * in the channel (EOC), then the function checks to see if
+ * another buffer is ready to send, and if so, sends a Tx
+ * Go command. Finally, the driver activates/continues the
+ * activity LED.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int eoc = 0;
- TLanList *head_list;
+ struct tlan_list *head_list;
dma_addr_t head_list_phys;
u32 ack = 0;
- u16 tmpCStat;
+ u16 tmp_c_stat;
- TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
- priv->txHead, priv->txTail );
- head_list = priv->txList + priv->txHead;
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
+ priv->tx_head, priv->tx_tail);
+ head_list = priv->tx_list + priv->tx_head;
- while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
- struct sk_buff *skb = TLan_GetSKB(head_list);
+ while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
+ && (ack < 255)) {
+ struct sk_buff *skb = tlan_get_skb(head_list);
ack++;
- pci_unmap_single(priv->pciDev, head_list->buffer[0].address,
+ pci_unmap_single(priv->pci_dev, head_list->buffer[0].address,
max(skb->len,
(unsigned int)TLAN_MIN_FRAME_SIZE),
PCI_DMA_TODEVICE);
@@ -1421,304 +1500,313 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
head_list->buffer[8].address = 0;
head_list->buffer[9].address = 0;
- if ( tmpCStat & TLAN_CSTAT_EOC )
+ if (tmp_c_stat & TLAN_CSTAT_EOC)
eoc = 1;
- dev->stats.tx_bytes += head_list->frameSize;
+ dev->stats.tx_bytes += head_list->frame_size;
- head_list->cStat = TLAN_CSTAT_UNUSED;
+ head_list->c_stat = TLAN_CSTAT_UNUSED;
netif_start_queue(dev);
- CIRC_INC( priv->txHead, TLAN_NUM_TX_LISTS );
- head_list = priv->txList + priv->txHead;
+ CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
+ head_list = priv->tx_list + priv->tx_head;
}
if (!ack)
- printk(KERN_INFO "TLAN: Received interrupt for uncompleted TX frame.\n");
-
- if ( eoc ) {
- TLAN_DBG( TLAN_DEBUG_TX,
- "TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n",
- priv->txHead, priv->txTail );
- head_list = priv->txList + priv->txHead;
- head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
- if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
- outl(head_list_phys, dev->base_addr + TLAN_CH_PARM );
+ printk(KERN_INFO
+ "TLAN: Received interrupt for uncompleted TX frame.\n");
+
+ if (eoc) {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n",
+ priv->tx_head, priv->tx_tail);
+ head_list = priv->tx_list + priv->tx_head;
+ head_list_phys = priv->tx_list_dma
+ + sizeof(struct tlan_list)*priv->tx_head;
+ if ((head_list->c_stat & TLAN_CSTAT_READY)
+ == TLAN_CSTAT_READY) {
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
ack |= TLAN_HC_GO;
} else {
- priv->txInProgress = 0;
+ priv->tx_in_progress = 0;
}
}
- if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
- TLan_DioWrite8( dev->base_addr,
- TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
- if ( priv->timer.function == NULL ) {
- priv->timer.function = TLan_Timer;
- priv->timer.data = (unsigned long) dev;
- priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
- priv->timerSetAt = jiffies;
- priv->timerType = TLAN_TIMER_ACTIVITY;
- add_timer(&priv->timer);
- } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) {
- priv->timerSetAt = jiffies;
+ if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
+ tlan_dio_write8(dev->base_addr,
+ TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
+ if (priv->timer.function == NULL) {
+ priv->timer.function = tlan_timer;
+ priv->timer.data = (unsigned long) dev;
+ priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
+ priv->timer_set_at = jiffies;
+ priv->timer_type = TLAN_TIMER_ACTIVITY;
+ add_timer(&priv->timer);
+ } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
+ priv->timer_set_at = jiffies;
}
}
return ack;
-} /* TLan_HandleTxEOF */
+}
- /***************************************************************
- * TLan_HandleStatOverflow
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This function handles the Statistics Overflow interrupt
- * which means that one or more of the TLAN statistics
- * registers has reached 1/2 capacity and needs to be read.
- *
- **************************************************************/
+/***************************************************************
+ * TLan_HandleStatOverflow
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Statistics Overflow interrupt
+ * which means that one or more of the TLAN statistics
+ * registers has reached 1/2 capacity and needs to be read.
+ *
+ **************************************************************/
-static u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
{
- TLan_ReadAndClearStats( dev, TLAN_RECORD );
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
return 1;
-} /* TLan_HandleStatOverflow */
-
-
-
-
- /***************************************************************
- * TLan_HandleRxEOF
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This function handles the Rx EOF interrupt which
- * indicates a frame has been received by the adapter from
- * the net and the frame has been transferred to memory.
- * The function determines the bounce buffer the frame has
- * been loaded into, creates a new sk_buff big enough to
- * hold the frame, and sends it to protocol stack. It
- * then resets the used buffer and appends it to the end
- * of the list. If the frame was the last in the Rx
- * channel (EOC), the function restarts the receive channel
- * by sending an Rx Go command to the adapter. Then it
- * activates/continues the activity LED.
- *
- **************************************************************/
-
-static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
+}
+
+
+
+
+/***************************************************************
+ * TLan_HandleRxEOF
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Rx EOF interrupt which
+ * indicates a frame has been received by the adapter from
+ * the net and the frame has been transferred to memory.
+ * The function determines the bounce buffer the frame has
+ * been loaded into, creates a new sk_buff big enough to
+ * hold the frame, and sends it to protocol stack. It
+ * then resets the used buffer and appends it to the end
+ * of the list. If the frame was the last in the Rx
+ * channel (EOC), the function restarts the receive channel
+ * by sending an Rx Go command to the adapter. Then it
+ * activates/continues the activity LED.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u32 ack = 0;
int eoc = 0;
- TLanList *head_list;
+ struct tlan_list *head_list;
struct sk_buff *skb;
- TLanList *tail_list;
- u16 tmpCStat;
+ struct tlan_list *tail_list;
+ u16 tmp_c_stat;
dma_addr_t head_list_phys;
- TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n",
- priv->rxHead, priv->rxTail );
- head_list = priv->rxList + priv->rxHead;
- head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
+ TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n",
+ priv->rx_head, priv->rx_tail);
+ head_list = priv->rx_list + priv->rx_head;
+ head_list_phys =
+ priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
- while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
- dma_addr_t frameDma = head_list->buffer[0].address;
- u32 frameSize = head_list->frameSize;
+ while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
+ && (ack < 255)) {
+ dma_addr_t frame_dma = head_list->buffer[0].address;
+ u32 frame_size = head_list->frame_size;
struct sk_buff *new_skb;
ack++;
- if (tmpCStat & TLAN_CSTAT_EOC)
+ if (tmp_c_stat & TLAN_CSTAT_EOC)
eoc = 1;
new_skb = netdev_alloc_skb_ip_align(dev,
TLAN_MAX_FRAME_SIZE + 5);
- if ( !new_skb )
+ if (!new_skb)
goto drop_and_reuse;
- skb = TLan_GetSKB(head_list);
- pci_unmap_single(priv->pciDev, frameDma,
+ skb = tlan_get_skb(head_list);
+ pci_unmap_single(priv->pci_dev, frame_dma,
TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
- skb_put( skb, frameSize );
+ skb_put(skb, frame_size);
- dev->stats.rx_bytes += frameSize;
+ dev->stats.rx_bytes += frame_size;
- skb->protocol = eth_type_trans( skb, dev );
- netif_rx( skb );
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
- head_list->buffer[0].address = pci_map_single(priv->pciDev,
- new_skb->data,
- TLAN_MAX_FRAME_SIZE,
- PCI_DMA_FROMDEVICE);
+ head_list->buffer[0].address =
+ pci_map_single(priv->pci_dev, new_skb->data,
+ TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
- TLan_StoreSKB(head_list, new_skb);
+ tlan_store_skb(head_list, new_skb);
drop_and_reuse:
head_list->forward = 0;
- head_list->cStat = 0;
- tail_list = priv->rxList + priv->rxTail;
+ head_list->c_stat = 0;
+ tail_list = priv->rx_list + priv->rx_tail;
tail_list->forward = head_list_phys;
- CIRC_INC( priv->rxHead, TLAN_NUM_RX_LISTS );
- CIRC_INC( priv->rxTail, TLAN_NUM_RX_LISTS );
- head_list = priv->rxList + priv->rxHead;
- head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
+ CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
+ CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
+ head_list = priv->rx_list + priv->rx_head;
+ head_list_phys = priv->rx_list_dma
+ + sizeof(struct tlan_list)*priv->rx_head;
}
if (!ack)
- printk(KERN_INFO "TLAN: Received interrupt for uncompleted RX frame.\n");
-
-
- if ( eoc ) {
- TLAN_DBG( TLAN_DEBUG_RX,
- "RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n",
- priv->rxHead, priv->rxTail );
- head_list = priv->rxList + priv->rxHead;
- head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
- outl(head_list_phys, dev->base_addr + TLAN_CH_PARM );
+ printk(KERN_INFO
+ "TLAN: Received interrupt for uncompleted RX frame.\n");
+
+
+ if (eoc) {
+ TLAN_DBG(TLAN_DEBUG_RX,
+ "RECEIVE: handling RX EOC (Head=%d Tail=%d)\n",
+ priv->rx_head, priv->rx_tail);
+ head_list = priv->rx_list + priv->rx_head;
+ head_list_phys = priv->rx_list_dma
+ + sizeof(struct tlan_list)*priv->rx_head;
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
ack |= TLAN_HC_GO | TLAN_HC_RT;
- priv->rxEocCount++;
+ priv->rx_eoc_count++;
}
- if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
- TLan_DioWrite8( dev->base_addr,
- TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
- if ( priv->timer.function == NULL ) {
- priv->timer.function = TLan_Timer;
+ if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
+ tlan_dio_write8(dev->base_addr,
+ TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
+ if (priv->timer.function == NULL) {
+ priv->timer.function = tlan_timer;
priv->timer.data = (unsigned long) dev;
priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
- priv->timerSetAt = jiffies;
- priv->timerType = TLAN_TIMER_ACTIVITY;
+ priv->timer_set_at = jiffies;
+ priv->timer_type = TLAN_TIMER_ACTIVITY;
add_timer(&priv->timer);
- } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) {
- priv->timerSetAt = jiffies;
+ } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
+ priv->timer_set_at = jiffies;
}
}
return ack;
-} /* TLan_HandleRxEOF */
+}
- /***************************************************************
- * TLan_HandleDummy
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This function handles the Dummy interrupt, which is
- * raised whenever a test interrupt is generated by setting
- * the Req_Int bit of HOST_CMD to 1.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_handle_dummy
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Dummy interrupt, which is
+ * raised whenever a test interrupt is generated by setting
+ * the Req_Int bit of HOST_CMD to 1.
+ *
+ **************************************************************/
-static u32 TLan_HandleDummy( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
{
- printk( "TLAN: Test interrupt on %s.\n", dev->name );
+ pr_info("TLAN: Test interrupt on %s.\n", dev->name);
return 1;
-} /* TLan_HandleDummy */
+}
- /***************************************************************
- * TLan_HandleTxEOC
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This driver is structured to determine EOC occurrences by
- * reading the CSTAT member of the list structure. Tx EOC
- * interrupts are disabled via the DIO INTDIS register.
- * However, TLAN chips before revision 3.0 didn't have this
- * functionality, so process EOC events if this is the
- * case.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_handle_tx_eoc
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This driver is structured to determine EOC occurrences by
+ * reading the CSTAT member of the list structure. Tx EOC
+ * interrupts are disabled via the DIO INTDIS register.
+ * However, TLAN chips before revision 3.0 didn't have this
+ * functionality, so process EOC events if this is the
+ * case.
+ *
+ **************************************************************/
-static u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
- TLanList *head_list;
+ struct tlan_priv *priv = netdev_priv(dev);
+ struct tlan_list *head_list;
dma_addr_t head_list_phys;
u32 ack = 1;
host_int = 0;
- if ( priv->tlanRev < 0x30 ) {
- TLAN_DBG( TLAN_DEBUG_TX,
- "TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
- priv->txHead, priv->txTail );
- head_list = priv->txList + priv->txHead;
- head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
- if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
+ if (priv->tlan_rev < 0x30) {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
+ priv->tx_head, priv->tx_tail);
+ head_list = priv->tx_list + priv->tx_head;
+ head_list_phys = priv->tx_list_dma
+ + sizeof(struct tlan_list)*priv->tx_head;
+ if ((head_list->c_stat & TLAN_CSTAT_READY)
+ == TLAN_CSTAT_READY) {
netif_stop_queue(dev);
- outl( head_list_phys, dev->base_addr + TLAN_CH_PARM );
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
ack |= TLAN_HC_GO;
} else {
- priv->txInProgress = 0;
+ priv->tx_in_progress = 0;
}
}
return ack;
-} /* TLan_HandleTxEOC */
+}
- /***************************************************************
- * TLan_HandleStatusCheck
- *
- * Returns:
- * 0 if Adapter check, 1 if Network Status check.
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This function handles Adapter Check/Network Status
- * interrupts generated by the adapter. It checks the
- * vector in the HOST_INT register to determine if it is
- * an Adapter Check interrupt. If so, it resets the
- * adapter. Otherwise it clears the status registers
- * and services the PHY.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_handle_status_check
+ *
+ * Returns:
+ * 0 if Adapter check, 1 if Network Status check.
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles Adapter Check/Network Status
+ * interrupts generated by the adapter. It checks the
+ * vector in the HOST_INT register to determine if it is
+ * an Adapter Check interrupt. If so, it resets the
+ * adapter. Otherwise it clears the status registers
+ * and services the PHY.
+ *
+ **************************************************************/
-static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u32 ack;
u32 error;
u8 net_sts;
@@ -1727,92 +1815,94 @@ static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
u16 tlphy_sts;
ack = 1;
- if ( host_int & TLAN_HI_IV_MASK ) {
- netif_stop_queue( dev );
- error = inl( dev->base_addr + TLAN_CH_PARM );
- printk( "TLAN: %s: Adaptor Error = 0x%x\n", dev->name, error );
- TLan_ReadAndClearStats( dev, TLAN_RECORD );
- outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD );
+ if (host_int & TLAN_HI_IV_MASK) {
+ netif_stop_queue(dev);
+ error = inl(dev->base_addr + TLAN_CH_PARM);
+ pr_info("TLAN: %s: Adaptor Error = 0x%x\n", dev->name, error);
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
+ outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
schedule_work(&priv->tlan_tqueue);
netif_wake_queue(dev);
ack = 0;
} else {
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name );
- phy = priv->phy[priv->phyNum];
-
- net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS );
- if ( net_sts ) {
- TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts );
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
- dev->name, (unsigned) net_sts );
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
+ phy = priv->phy[priv->phy_num];
+
+ net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
+ if (net_sts) {
+ tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
+ dev->name, (unsigned) net_sts);
}
- if ( ( net_sts & TLAN_NET_STS_MIRQ ) && ( priv->phyNum == 0 ) ) {
- TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts );
- TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
- if ( ! ( tlphy_sts & TLAN_TS_POLOK ) &&
- ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
- tlphy_ctl |= TLAN_TC_SWAPOL;
- TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
- } else if ( ( tlphy_sts & TLAN_TS_POLOK ) &&
- ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
- tlphy_ctl &= ~TLAN_TC_SWAPOL;
- TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
- }
-
- if (debug) {
- TLan_PhyPrint( dev );
+ if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) {
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+ if (!(tlphy_sts & TLAN_TS_POLOK) &&
+ !(tlphy_ctl & TLAN_TC_SWAPOL)) {
+ tlphy_ctl |= TLAN_TC_SWAPOL;
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+ tlphy_ctl);
+ } else if ((tlphy_sts & TLAN_TS_POLOK) &&
+ (tlphy_ctl & TLAN_TC_SWAPOL)) {
+ tlphy_ctl &= ~TLAN_TC_SWAPOL;
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+ tlphy_ctl);
}
+
+ if (debug)
+ tlan_phy_print(dev);
}
}
return ack;
-} /* TLan_HandleStatusCheck */
+}
- /***************************************************************
- * TLan_HandleRxEOC
- *
- * Returns:
- * 1
- * Parms:
- * dev Device assigned the IRQ that was
- * raised.
- * host_int The contents of the HOST_INT
- * port.
- *
- * This driver is structured to determine EOC occurrences by
- * reading the CSTAT member of the list structure. Rx EOC
- * interrupts are disabled via the DIO INTDIS register.
- * However, TLAN chips before revision 3.0 didn't have this
- * CSTAT member or a INTDIS register, so if this chip is
- * pre-3.0, process EOC interrupts normally.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_handle_rx_eoc
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This driver is structured to determine EOC occurrences by
+ * reading the CSTAT member of the list structure. Rx EOC
+ * interrupts are disabled via the DIO INTDIS register.
+ * However, TLAN chips before revision 3.0 didn't have this
+ * CSTAT member or a INTDIS register, so if this chip is
+ * pre-3.0, process EOC interrupts normally.
+ *
+ **************************************************************/
-static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
dma_addr_t head_list_phys;
u32 ack = 1;
- if ( priv->tlanRev < 0x30 ) {
- TLAN_DBG( TLAN_DEBUG_RX,
- "RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n",
- priv->rxHead, priv->rxTail );
- head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
- outl( head_list_phys, dev->base_addr + TLAN_CH_PARM );
+ if (priv->tlan_rev < 0x30) {
+ TLAN_DBG(TLAN_DEBUG_RX,
+ "RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n",
+ priv->rx_head, priv->rx_tail);
+ head_list_phys = priv->rx_list_dma
+ + sizeof(struct tlan_list)*priv->rx_head;
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
ack |= TLAN_HC_GO | TLAN_HC_RT;
- priv->rxEocCount++;
+ priv->rx_eoc_count++;
}
return ack;
-} /* TLan_HandleRxEOC */
+}
@@ -1820,98 +1910,98 @@ static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver Timer Function
+ThunderLAN driver timer function
******************************************************************************
*****************************************************************************/
- /***************************************************************
- * TLan_Timer
- *
- * Returns:
- * Nothing
- * Parms:
- * data A value given to add timer when
- * add_timer was called.
- *
- * This function handles timed functionality for the
- * TLAN driver. The two current timer uses are for
- * delaying for autonegotionation and driving the ACT LED.
- * - Autonegotiation requires being allowed about
- * 2 1/2 seconds before attempting to transmit a
- * packet. It would be a very bad thing to hang
- * the kernel this long, so the driver doesn't
- * allow transmission 'til after this time, for
- * certain PHYs. It would be much nicer if all
- * PHYs were interrupt-capable like the internal
- * PHY.
- * - The ACT LED, which shows adapter activity, is
- * driven by the driver, and so must be left on
- * for a short period to power up the LED so it
- * can be seen. This delay can be changed by
- * changing the TLAN_TIMER_ACT_DELAY in tlan.h,
- * if desired. 100 ms produces a slightly
- * sluggish response.
- *
- **************************************************************/
-
-static void TLan_Timer( unsigned long data )
+/***************************************************************
+ * tlan_timer
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * data A value given to add timer when
+ * add_timer was called.
+ *
+ * This function handles timed functionality for the
+ * TLAN driver. The two current timer uses are for
+ * delaying for autonegotionation and driving the ACT LED.
+ * - Autonegotiation requires being allowed about
+ * 2 1/2 seconds before attempting to transmit a
+ * packet. It would be a very bad thing to hang
+ * the kernel this long, so the driver doesn't
+ * allow transmission 'til after this time, for
+ * certain PHYs. It would be much nicer if all
+ * PHYs were interrupt-capable like the internal
+ * PHY.
+ * - The ACT LED, which shows adapter activity, is
+ * driven by the driver, and so must be left on
+ * for a short period to power up the LED so it
+ * can be seen. This delay can be changed by
+ * changing the TLAN_TIMER_ACT_DELAY in tlan.h,
+ * if desired. 100 ms produces a slightly
+ * sluggish response.
+ *
+ **************************************************************/
+
+static void tlan_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u32 elapsed;
unsigned long flags = 0;
priv->timer.function = NULL;
- switch ( priv->timerType ) {
+ switch (priv->timer_type) {
#ifdef MONITOR
- case TLAN_TIMER_LINK_BEAT:
- TLan_PhyMonitor( dev );
- break;
+ case TLAN_TIMER_LINK_BEAT:
+ tlan_phy_monitor(dev);
+ break;
#endif
- case TLAN_TIMER_PHY_PDOWN:
- TLan_PhyPowerDown( dev );
- break;
- case TLAN_TIMER_PHY_PUP:
- TLan_PhyPowerUp( dev );
- break;
- case TLAN_TIMER_PHY_RESET:
- TLan_PhyReset( dev );
- break;
- case TLAN_TIMER_PHY_START_LINK:
- TLan_PhyStartLink( dev );
- break;
- case TLAN_TIMER_PHY_FINISH_AN:
- TLan_PhyFinishAutoNeg( dev );
- break;
- case TLAN_TIMER_FINISH_RESET:
- TLan_FinishReset( dev );
- break;
- case TLAN_TIMER_ACTIVITY:
- spin_lock_irqsave(&priv->lock, flags);
- if ( priv->timer.function == NULL ) {
- elapsed = jiffies - priv->timerSetAt;
- if ( elapsed >= TLAN_TIMER_ACT_DELAY ) {
- TLan_DioWrite8( dev->base_addr,
- TLAN_LED_REG, TLAN_LED_LINK );
- } else {
- priv->timer.function = TLan_Timer;
- priv->timer.expires = priv->timerSetAt
- + TLAN_TIMER_ACT_DELAY;
- spin_unlock_irqrestore(&priv->lock, flags);
- add_timer( &priv->timer );
- break;
- }
+ case TLAN_TIMER_PHY_PDOWN:
+ tlan_phy_power_down(dev);
+ break;
+ case TLAN_TIMER_PHY_PUP:
+ tlan_phy_power_up(dev);
+ break;
+ case TLAN_TIMER_PHY_RESET:
+ tlan_phy_reset(dev);
+ break;
+ case TLAN_TIMER_PHY_START_LINK:
+ tlan_phy_start_link(dev);
+ break;
+ case TLAN_TIMER_PHY_FINISH_AN:
+ tlan_phy_finish_auto_neg(dev);
+ break;
+ case TLAN_TIMER_FINISH_RESET:
+ tlan_finish_reset(dev);
+ break;
+ case TLAN_TIMER_ACTIVITY:
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->timer.function == NULL) {
+ elapsed = jiffies - priv->timer_set_at;
+ if (elapsed >= TLAN_TIMER_ACT_DELAY) {
+ tlan_dio_write8(dev->base_addr,
+ TLAN_LED_REG, TLAN_LED_LINK);
+ } else {
+ priv->timer.function = tlan_timer;
+ priv->timer.expires = priv->timer_set_at
+ + TLAN_TIMER_ACT_DELAY;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ add_timer(&priv->timer);
+ break;
}
- spin_unlock_irqrestore(&priv->lock, flags);
- break;
- default:
- break;
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+ break;
+ default:
+ break;
}
-} /* TLan_Timer */
+}
@@ -1919,39 +2009,39 @@ static void TLan_Timer( unsigned long data )
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver Adapter Related Routines
+ThunderLAN driver adapter related routines
******************************************************************************
*****************************************************************************/
- /***************************************************************
- * TLan_ResetLists
- *
- * Returns:
- * Nothing
- * Parms:
- * dev The device structure with the list
- * stuctures to be reset.
- *
- * This routine sets the variables associated with managing
- * the TLAN lists to their initial values.
- *
- **************************************************************/
-
-static void TLan_ResetLists( struct net_device *dev )
+/***************************************************************
+ * tlan_reset_lists
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure with the list
+ * stuctures to be reset.
+ *
+ * This routine sets the variables associated with managing
+ * the TLAN lists to their initial values.
+ *
+ **************************************************************/
+
+static void tlan_reset_lists(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int i;
- TLanList *list;
+ struct tlan_list *list;
dma_addr_t list_phys;
struct sk_buff *skb;
- priv->txHead = 0;
- priv->txTail = 0;
- for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
- list = priv->txList + i;
- list->cStat = TLAN_CSTAT_UNUSED;
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+ for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
+ list = priv->tx_list + i;
+ list->c_stat = TLAN_CSTAT_UNUSED;
list->buffer[0].address = 0;
list->buffer[2].count = 0;
list->buffer[2].address = 0;
@@ -1959,169 +2049,169 @@ static void TLan_ResetLists( struct net_device *dev )
list->buffer[9].address = 0;
}
- priv->rxHead = 0;
- priv->rxTail = TLAN_NUM_RX_LISTS - 1;
- for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
- list = priv->rxList + i;
- list_phys = priv->rxListDMA + sizeof(TLanList) * i;
- list->cStat = TLAN_CSTAT_READY;
- list->frameSize = TLAN_MAX_FRAME_SIZE;
+ priv->rx_head = 0;
+ priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
+ for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
+ list = priv->rx_list + i;
+ list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
+ list->c_stat = TLAN_CSTAT_READY;
+ list->frame_size = TLAN_MAX_FRAME_SIZE;
list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
- if ( !skb ) {
- pr_err("TLAN: out of memory for received data.\n" );
+ if (!skb) {
+ pr_err("TLAN: out of memory for received data.\n");
break;
}
- list->buffer[0].address = pci_map_single(priv->pciDev,
+ list->buffer[0].address = pci_map_single(priv->pci_dev,
skb->data,
TLAN_MAX_FRAME_SIZE,
PCI_DMA_FROMDEVICE);
- TLan_StoreSKB(list, skb);
+ tlan_store_skb(list, skb);
list->buffer[1].count = 0;
list->buffer[1].address = 0;
- list->forward = list_phys + sizeof(TLanList);
+ list->forward = list_phys + sizeof(struct tlan_list);
}
/* in case ran out of memory early, clear bits */
while (i < TLAN_NUM_RX_LISTS) {
- TLan_StoreSKB(priv->rxList + i, NULL);
+ tlan_store_skb(priv->rx_list + i, NULL);
++i;
}
list->forward = 0;
-} /* TLan_ResetLists */
+}
-static void TLan_FreeLists( struct net_device *dev )
+static void tlan_free_lists(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int i;
- TLanList *list;
+ struct tlan_list *list;
struct sk_buff *skb;
- for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
- list = priv->txList + i;
- skb = TLan_GetSKB(list);
- if ( skb ) {
+ for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
+ list = priv->tx_list + i;
+ skb = tlan_get_skb(list);
+ if (skb) {
pci_unmap_single(
- priv->pciDev,
+ priv->pci_dev,
list->buffer[0].address,
max(skb->len,
(unsigned int)TLAN_MIN_FRAME_SIZE),
PCI_DMA_TODEVICE);
- dev_kfree_skb_any( skb );
+ dev_kfree_skb_any(skb);
list->buffer[8].address = 0;
list->buffer[9].address = 0;
}
}
- for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
- list = priv->rxList + i;
- skb = TLan_GetSKB(list);
- if ( skb ) {
- pci_unmap_single(priv->pciDev,
+ for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
+ list = priv->rx_list + i;
+ skb = tlan_get_skb(list);
+ if (skb) {
+ pci_unmap_single(priv->pci_dev,
list->buffer[0].address,
TLAN_MAX_FRAME_SIZE,
PCI_DMA_FROMDEVICE);
- dev_kfree_skb_any( skb );
+ dev_kfree_skb_any(skb);
list->buffer[8].address = 0;
list->buffer[9].address = 0;
}
}
-} /* TLan_FreeLists */
+}
- /***************************************************************
- * TLan_PrintDio
- *
- * Returns:
- * Nothing
- * Parms:
- * io_base Base IO port of the device of
- * which to print DIO registers.
- *
- * This function prints out all the internal (DIO)
- * registers of a TLAN chip.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_print_dio
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base Base IO port of the device of
+ * which to print DIO registers.
+ *
+ * This function prints out all the internal (DIO)
+ * registers of a TLAN chip.
+ *
+ **************************************************************/
-static void TLan_PrintDio( u16 io_base )
+static void tlan_print_dio(u16 io_base)
{
u32 data0, data1;
int i;
- printk( "TLAN: Contents of internal registers for io base 0x%04hx.\n",
- io_base );
- printk( "TLAN: Off. +0 +4\n" );
- for ( i = 0; i < 0x4C; i+= 8 ) {
- data0 = TLan_DioRead32( io_base, i );
- data1 = TLan_DioRead32( io_base, i + 0x4 );
- printk( "TLAN: 0x%02x 0x%08x 0x%08x\n", i, data0, data1 );
+ pr_info("TLAN: Contents of internal registers for io base 0x%04hx.\n",
+ io_base);
+ pr_info("TLAN: Off. +0 +4\n");
+ for (i = 0; i < 0x4C; i += 8) {
+ data0 = tlan_dio_read32(io_base, i);
+ data1 = tlan_dio_read32(io_base, i + 0x4);
+ pr_info("TLAN: 0x%02x 0x%08x 0x%08x\n", i, data0, data1);
}
-} /* TLan_PrintDio */
+}
- /***************************************************************
- * TLan_PrintList
- *
- * Returns:
- * Nothing
- * Parms:
- * list A pointer to the TLanList structure to
- * be printed.
- * type A string to designate type of list,
- * "Rx" or "Tx".
- * num The index of the list.
- *
- * This function prints out the contents of the list
- * pointed to by the list parameter.
- *
- **************************************************************/
+/***************************************************************
+ * TLan_PrintList
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * list A pointer to the struct tlan_list structure to
+ * be printed.
+ * type A string to designate type of list,
+ * "Rx" or "Tx".
+ * num The index of the list.
+ *
+ * This function prints out the contents of the list
+ * pointed to by the list parameter.
+ *
+ **************************************************************/
-static void TLan_PrintList( TLanList *list, char *type, int num)
+static void tlan_print_list(struct tlan_list *list, char *type, int num)
{
int i;
- printk( "TLAN: %s List %d at %p\n", type, num, list );
- printk( "TLAN: Forward = 0x%08x\n", list->forward );
- printk( "TLAN: CSTAT = 0x%04hx\n", list->cStat );
- printk( "TLAN: Frame Size = 0x%04hx\n", list->frameSize );
- /* for ( i = 0; i < 10; i++ ) { */
- for ( i = 0; i < 2; i++ ) {
- printk( "TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
- i, list->buffer[i].count, list->buffer[i].address );
+ pr_info("TLAN: %s List %d at %p\n", type, num, list);
+ pr_info("TLAN: Forward = 0x%08x\n", list->forward);
+ pr_info("TLAN: CSTAT = 0x%04hx\n", list->c_stat);
+ pr_info("TLAN: Frame Size = 0x%04hx\n", list->frame_size);
+ /* for (i = 0; i < 10; i++) { */
+ for (i = 0; i < 2; i++) {
+ pr_info("TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
+ i, list->buffer[i].count, list->buffer[i].address);
}
-} /* TLan_PrintList */
+}
- /***************************************************************
- * TLan_ReadAndClearStats
- *
- * Returns:
- * Nothing
- * Parms:
- * dev Pointer to device structure of adapter
- * to which to read stats.
- * record Flag indicating whether to add
- *
- * This functions reads all the internal status registers
- * of the TLAN chip, which clears them as a side effect.
- * It then either adds the values to the device's status
- * struct, or discards them, depending on whether record
- * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0).
- *
- **************************************************************/
+/***************************************************************
+ * tlan_read_and_clear_stats
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * to which to read stats.
+ * record Flag indicating whether to add
+ *
+ * This functions reads all the internal status registers
+ * of the TLAN chip, which clears them as a side effect.
+ * It then either adds the values to the device's status
+ * struct, or discards them, depending on whether record
+ * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0).
+ *
+ **************************************************************/
-static void TLan_ReadAndClearStats( struct net_device *dev, int record )
+static void tlan_read_and_clear_stats(struct net_device *dev, int record)
{
u32 tx_good, tx_under;
u32 rx_good, rx_over;
@@ -2129,41 +2219,42 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
u32 multi_col, single_col;
u32 excess_col, late_col, loss;
- outw( TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR );
- tx_good = inb( dev->base_addr + TLAN_DIO_DATA );
- tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
- tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16;
- tx_under = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
-
- outw( TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR );
- rx_good = inb( dev->base_addr + TLAN_DIO_DATA );
- rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
- rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16;
- rx_over = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
-
- outw( TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR );
- def_tx = inb( dev->base_addr + TLAN_DIO_DATA );
- def_tx += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
- crc = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
- code = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
-
- outw( TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR );
- multi_col = inb( dev->base_addr + TLAN_DIO_DATA );
- multi_col += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
- single_col = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
- single_col += inb( dev->base_addr + TLAN_DIO_DATA + 3 ) << 8;
-
- outw( TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR );
- excess_col = inb( dev->base_addr + TLAN_DIO_DATA );
- late_col = inb( dev->base_addr + TLAN_DIO_DATA + 1 );
- loss = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
-
- if ( record ) {
+ outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ tx_good = inb(dev->base_addr + TLAN_DIO_DATA);
+ tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
+ tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+ outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ rx_good = inb(dev->base_addr + TLAN_DIO_DATA);
+ rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
+ rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+ outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
+ def_tx = inb(dev->base_addr + TLAN_DIO_DATA);
+ def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ crc = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+ code = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+ outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ multi_col = inb(dev->base_addr + TLAN_DIO_DATA);
+ multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+ single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
+
+ outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
+ late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1);
+ loss = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+
+ if (record) {
dev->stats.rx_packets += rx_good;
dev->stats.rx_errors += rx_over + crc + code;
dev->stats.tx_packets += tx_good;
dev->stats.tx_errors += tx_under + loss;
- dev->stats.collisions += multi_col + single_col + excess_col + late_col;
+ dev->stats.collisions += multi_col
+ + single_col + excess_col + late_col;
dev->stats.rx_over_errors += rx_over;
dev->stats.rx_crc_errors += crc;
@@ -2173,39 +2264,39 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record )
dev->stats.tx_carrier_errors += loss;
}
-} /* TLan_ReadAndClearStats */
+}
- /***************************************************************
- * TLan_Reset
- *
- * Returns:
- * 0
- * Parms:
- * dev Pointer to device structure of adapter
- * to be reset.
- *
- * This function resets the adapter and it's physical
- * device. See Chap. 3, pp. 9-10 of the "ThunderLAN
- * Programmer's Guide" for details. The routine tries to
- * implement what is detailed there, though adjustments
- * have been made.
- *
- **************************************************************/
+/***************************************************************
+ * TLan_Reset
+ *
+ * Returns:
+ * 0
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * to be reset.
+ *
+ * This function resets the adapter and it's physical
+ * device. See Chap. 3, pp. 9-10 of the "ThunderLAN
+ * Programmer's Guide" for details. The routine tries to
+ * implement what is detailed there, though adjustments
+ * have been made.
+ *
+ **************************************************************/
static void
-TLan_ResetAdapter( struct net_device *dev )
+tlan_reset_adapter(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
int i;
u32 addr;
u32 data;
u8 data8;
- priv->tlanFullDuplex = false;
- priv->phyOnline=0;
+ priv->tlan_full_duplex = false;
+ priv->phy_online = 0;
netif_carrier_off(dev);
/* 1. Assert reset bit. */
@@ -2216,7 +2307,7 @@ TLan_ResetAdapter( struct net_device *dev )
udelay(1000);
-/* 2. Turn off interrupts. ( Probably isn't necessary ) */
+/* 2. Turn off interrupts. (Probably isn't necessary) */
data = inl(dev->base_addr + TLAN_HOST_CMD);
data |= TLAN_HC_INT_OFF;
@@ -2224,207 +2315,208 @@ TLan_ResetAdapter( struct net_device *dev )
/* 3. Clear AREGs and HASHs. */
- for ( i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4 ) {
- TLan_DioWrite32( dev->base_addr, (u16) i, 0 );
- }
+ for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
+ tlan_dio_write32(dev->base_addr, (u16) i, 0);
/* 4. Setup NetConfig register. */
data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
- TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
/* 5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */
- outl( TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD );
- outl( TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD );
+ outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
+ outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
/* 6. Unreset the MII by setting NMRST (in NetSio) to 1. */
- outw( TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
- TLan_SetBit( TLAN_NET_SIO_NMRST, addr );
+ tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
/* 7. Setup the remaining registers. */
- if ( priv->tlanRev >= 0x30 ) {
+ if (priv->tlan_rev >= 0x30) {
data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
- TLan_DioWrite8( dev->base_addr, TLAN_INT_DIS, data8 );
+ tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
}
- TLan_PhyDetect( dev );
+ tlan_phy_detect(dev);
data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
- if ( priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY ) {
+ if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
data |= TLAN_NET_CFG_BIT;
- if ( priv->aui == 1 ) {
- TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a );
- } else if ( priv->duplex == TLAN_DUPLEX_FULL ) {
- TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 );
- priv->tlanFullDuplex = true;
+ if (priv->aui == 1) {
+ tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
+ } else if (priv->duplex == TLAN_DUPLEX_FULL) {
+ tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
+ priv->tlan_full_duplex = true;
} else {
- TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 );
+ tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
}
}
- if ( priv->phyNum == 0 ) {
+ if (priv->phy_num == 0)
data |= TLAN_NET_CFG_PHY_EN;
- }
- TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
- if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
- TLan_FinishReset( dev );
- } else {
- TLan_PhyPowerDown( dev );
- }
+ if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
+ tlan_finish_reset(dev);
+ else
+ tlan_phy_power_down(dev);
-} /* TLan_ResetAdapter */
+}
static void
-TLan_FinishReset( struct net_device *dev )
+tlan_finish_reset(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u8 data;
u32 phy;
u8 sio;
u16 status;
u16 partner;
u16 tlphy_ctl;
- u16 tlphy_par;
+ u16 tlphy_par;
u16 tlphy_id1, tlphy_id2;
- int i;
+ int i;
- phy = priv->phy[priv->phyNum];
+ phy = priv->phy[priv->phy_num];
data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
- if ( priv->tlanFullDuplex ) {
+ if (priv->tlan_full_duplex)
data |= TLAN_NET_CMD_DUPLEX;
- }
- TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, data );
+ tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
- if ( priv->phyNum == 0 ) {
+ if (priv->phy_num == 0)
data |= TLAN_NET_MASK_MASK7;
- }
- TLan_DioWrite8( dev->base_addr, TLAN_NET_MASK, data );
- TLan_DioWrite16( dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7 );
- TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &tlphy_id1 );
- TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &tlphy_id2 );
+ tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
+ tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
- if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) ||
- ( priv->aui ) ) {
+ if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
+ (priv->aui)) {
status = MII_GS_LINK;
- printk( "TLAN: %s: Link forced.\n", dev->name );
+ pr_info("TLAN: %s: Link forced.\n", dev->name);
} else {
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
- udelay( 1000 );
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
- if ( (status & MII_GS_LINK) &&
- /* We only support link info on Nat.Sem. PHY's */
- (tlphy_id1 == NAT_SEM_ID1) &&
- (tlphy_id2 == NAT_SEM_ID2) ) {
- TLan_MiiReadReg( dev, phy, MII_AN_LPA, &partner );
- TLan_MiiReadReg( dev, phy, TLAN_TLPHY_PAR, &tlphy_par );
-
- printk( "TLAN: %s: Link active with ", dev->name );
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ udelay(1000);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ if ((status & MII_GS_LINK) &&
+ /* We only support link info on Nat.Sem. PHY's */
+ (tlphy_id1 == NAT_SEM_ID1) &&
+ (tlphy_id2 == NAT_SEM_ID2)) {
+ tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner);
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par);
+
+ pr_info("TLAN: %s: Link active with ", dev->name);
if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) {
- printk( "forced 10%sMbps %s-Duplex\n",
- tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
- tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
+ pr_info("forced 10%sMbps %s-Duplex\n",
+ tlphy_par & TLAN_PHY_SPEED_100
+ ? "" : "0",
+ tlphy_par & TLAN_PHY_DUPLEX_FULL
+ ? "Full" : "Half");
} else {
- printk( "AutoNegotiation enabled, at 10%sMbps %s-Duplex\n",
- tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
- tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
- printk("TLAN: Partner capability: ");
- for (i = 5; i <= 10; i++)
- if (partner & (1<<i))
- printk("%s",media[i-5]);
+ pr_info("Autonegotiation enabled, at 10%sMbps %s-Duplex\n",
+ tlphy_par & TLAN_PHY_SPEED_100
+ ? "" : "0",
+ tlphy_par & TLAN_PHY_DUPLEX_FULL
+ ? "Full" : "half");
+ pr_info("TLAN: Partner capability: ");
+ for (i = 5; i <= 10; i++)
+ if (partner & (1<<i))
+ printk("%s", media[i-5]);
printk("\n");
}
- TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
+ tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
+ TLAN_LED_LINK);
#ifdef MONITOR
/* We have link beat..for now anyway */
- priv->link = 1;
- /*Enabling link beat monitoring */
- TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_LINK_BEAT );
+ priv->link = 1;
+ /*Enabling link beat monitoring */
+ tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT);
#endif
} else if (status & MII_GS_LINK) {
- printk( "TLAN: %s: Link active\n", dev->name );
- TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
+ pr_info("TLAN: %s: Link active\n", dev->name);
+ tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
+ TLAN_LED_LINK);
}
}
- if ( priv->phyNum == 0 ) {
- TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
- tlphy_ctl |= TLAN_TC_INTEN;
- TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl );
- sio = TLan_DioRead8( dev->base_addr, TLAN_NET_SIO );
- sio |= TLAN_NET_SIO_MINTEN;
- TLan_DioWrite8( dev->base_addr, TLAN_NET_SIO, sio );
- }
-
- if ( status & MII_GS_LINK ) {
- TLan_SetMac( dev, 0, dev->dev_addr );
- priv->phyOnline = 1;
- outb( ( TLAN_HC_INT_ON >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 );
- if ( debug >= 1 && debug != TLAN_DEBUG_PROBE ) {
- outb( ( TLAN_HC_REQ_INT >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 );
- }
- outl( priv->rxListDMA, dev->base_addr + TLAN_CH_PARM );
- outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD );
+ if (priv->phy_num == 0) {
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+ tlphy_ctl |= TLAN_TC_INTEN;
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
+ sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
+ sio |= TLAN_NET_SIO_MINTEN;
+ tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
+ }
+
+ if (status & MII_GS_LINK) {
+ tlan_set_mac(dev, 0, dev->dev_addr);
+ priv->phy_online = 1;
+ outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
+ if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
+ outb((TLAN_HC_REQ_INT >> 8),
+ dev->base_addr + TLAN_HOST_CMD + 1);
+ outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
+ outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
netif_carrier_on(dev);
} else {
- printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n",
- dev->name );
- TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET );
+ pr_info("TLAN: %s: Link inactive, will retry in 10 secs...\n",
+ dev->name);
+ tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
return;
}
- TLan_SetMulticastList(dev);
+ tlan_set_multicast_list(dev);
-} /* TLan_FinishReset */
+}
- /***************************************************************
- * TLan_SetMac
- *
- * Returns:
- * Nothing
- * Parms:
- * dev Pointer to device structure of adapter
- * on which to change the AREG.
- * areg The AREG to set the address in (0 - 3).
- * mac A pointer to an array of chars. Each
- * element stores one byte of the address.
- * IE, it isn't in ascii.
- *
- * This function transfers a MAC address to one of the
- * TLAN AREGs (address registers). The TLAN chip locks
- * the register on writing to offset 0 and unlocks the
- * register after writing to offset 5. If NULL is passed
- * in mac, then the AREG is filled with 0's.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_set_mac
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * on which to change the AREG.
+ * areg The AREG to set the address in (0 - 3).
+ * mac A pointer to an array of chars. Each
+ * element stores one byte of the address.
+ * IE, it isn't in ascii.
+ *
+ * This function transfers a MAC address to one of the
+ * TLAN AREGs (address registers). The TLAN chip locks
+ * the register on writing to offset 0 and unlocks the
+ * register after writing to offset 5. If NULL is passed
+ * in mac, then the AREG is filled with 0's.
+ *
+ **************************************************************/
-static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
+static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
{
int i;
areg *= 6;
- if ( mac != NULL ) {
- for ( i = 0; i < 6; i++ )
- TLan_DioWrite8( dev->base_addr,
- TLAN_AREG_0 + areg + i, mac[i] );
+ if (mac != NULL) {
+ for (i = 0; i < 6; i++)
+ tlan_dio_write8(dev->base_addr,
+ TLAN_AREG_0 + areg + i, mac[i]);
} else {
- for ( i = 0; i < 6; i++ )
- TLan_DioWrite8( dev->base_addr,
- TLAN_AREG_0 + areg + i, 0 );
+ for (i = 0; i < 6; i++)
+ tlan_dio_write8(dev->base_addr,
+ TLAN_AREG_0 + areg + i, 0);
}
-} /* TLan_SetMac */
+}
@@ -2432,205 +2524,202 @@ static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver PHY Layer Routines
+ThunderLAN driver PHY layer routines
******************************************************************************
*****************************************************************************/
- /*********************************************************************
- * TLan_PhyPrint
- *
- * Returns:
- * Nothing
- * Parms:
- * dev A pointer to the device structure of the
- * TLAN device having the PHYs to be detailed.
- *
- * This function prints the registers a PHY (aka transceiver).
- *
- ********************************************************************/
+/*********************************************************************
+ * tlan_phy_print
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev A pointer to the device structure of the
+ * TLAN device having the PHYs to be detailed.
+ *
+ * This function prints the registers a PHY (aka transceiver).
+ *
+ ********************************************************************/
-static void TLan_PhyPrint( struct net_device *dev )
+static void tlan_phy_print(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 i, data0, data1, data2, data3, phy;
- phy = priv->phy[priv->phyNum];
-
- if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
- printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name );
- } else if ( phy <= TLAN_PHY_MAX_ADDR ) {
- printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy );
- printk( "TLAN: Off. +0 +1 +2 +3\n" );
- for ( i = 0; i < 0x20; i+= 4 ) {
- printk( "TLAN: 0x%02x", i );
- TLan_MiiReadReg( dev, phy, i, &data0 );
- printk( " 0x%04hx", data0 );
- TLan_MiiReadReg( dev, phy, i + 1, &data1 );
- printk( " 0x%04hx", data1 );
- TLan_MiiReadReg( dev, phy, i + 2, &data2 );
- printk( " 0x%04hx", data2 );
- TLan_MiiReadReg( dev, phy, i + 3, &data3 );
- printk( " 0x%04hx\n", data3 );
+ phy = priv->phy[priv->phy_num];
+
+ if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
+ pr_info("TLAN: Device %s, Unmanaged PHY.\n", dev->name);
+ } else if (phy <= TLAN_PHY_MAX_ADDR) {
+ pr_info("TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy);
+ pr_info("TLAN: Off. +0 +1 +2 +3\n");
+ for (i = 0; i < 0x20; i += 4) {
+ pr_info("TLAN: 0x%02x", i);
+ tlan_mii_read_reg(dev, phy, i, &data0);
+ printk(" 0x%04hx", data0);
+ tlan_mii_read_reg(dev, phy, i + 1, &data1);
+ printk(" 0x%04hx", data1);
+ tlan_mii_read_reg(dev, phy, i + 2, &data2);
+ printk(" 0x%04hx", data2);
+ tlan_mii_read_reg(dev, phy, i + 3, &data3);
+ printk(" 0x%04hx\n", data3);
}
} else {
- printk( "TLAN: Device %s, Invalid PHY.\n", dev->name );
+ pr_info("TLAN: Device %s, Invalid PHY.\n", dev->name);
}
-} /* TLan_PhyPrint */
+}
- /*********************************************************************
- * TLan_PhyDetect
- *
- * Returns:
- * Nothing
- * Parms:
- * dev A pointer to the device structure of the adapter
- * for which the PHY needs determined.
- *
- * So far I've found that adapters which have external PHYs
- * may also use the internal PHY for part of the functionality.
- * (eg, AUI/Thinnet). This function finds out if this TLAN
- * chip has an internal PHY, and then finds the first external
- * PHY (starting from address 0) if it exists).
- *
- ********************************************************************/
+/*********************************************************************
+ * tlan_phy_detect
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev A pointer to the device structure of the adapter
+ * for which the PHY needs determined.
+ *
+ * So far I've found that adapters which have external PHYs
+ * may also use the internal PHY for part of the functionality.
+ * (eg, AUI/Thinnet). This function finds out if this TLAN
+ * chip has an internal PHY, and then finds the first external
+ * PHY (starting from address 0) if it exists).
+ *
+ ********************************************************************/
-static void TLan_PhyDetect( struct net_device *dev )
+static void tlan_phy_detect(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 control;
u16 hi;
u16 lo;
u32 phy;
- if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
- priv->phyNum = 0xFFFF;
+ if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
+ priv->phy_num = 0xffff;
return;
}
- TLan_MiiReadReg( dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi );
+ tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
- if ( hi != 0xFFFF ) {
+ if (hi != 0xffff)
priv->phy[0] = TLAN_PHY_MAX_ADDR;
- } else {
+ else
priv->phy[0] = TLAN_PHY_NONE;
- }
priv->phy[1] = TLAN_PHY_NONE;
- for ( phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++ ) {
- TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control );
- TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi );
- TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo );
- if ( ( control != 0xFFFF ) ||
- ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) {
- TLAN_DBG( TLAN_DEBUG_GNRL,
- "PHY found at %02x %04x %04x %04x\n",
- phy, control, hi, lo );
- if ( ( priv->phy[1] == TLAN_PHY_NONE ) &&
- ( phy != TLAN_PHY_MAX_ADDR ) ) {
+ for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
+ tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
+ if ((control != 0xffff) ||
+ (hi != 0xffff) || (lo != 0xffff)) {
+ TLAN_DBG(TLAN_DEBUG_GNRL,
+ "PHY found at %02x %04x %04x %04x\n",
+ phy, control, hi, lo);
+ if ((priv->phy[1] == TLAN_PHY_NONE) &&
+ (phy != TLAN_PHY_MAX_ADDR)) {
priv->phy[1] = phy;
}
}
}
- if ( priv->phy[1] != TLAN_PHY_NONE ) {
- priv->phyNum = 1;
- } else if ( priv->phy[0] != TLAN_PHY_NONE ) {
- priv->phyNum = 0;
- } else {
- printk( "TLAN: Cannot initialize device, no PHY was found!\n" );
- }
+ if (priv->phy[1] != TLAN_PHY_NONE)
+ priv->phy_num = 1;
+ else if (priv->phy[0] != TLAN_PHY_NONE)
+ priv->phy_num = 0;
+ else
+ pr_info("TLAN: Cannot initialize device, no PHY was found!\n");
-} /* TLan_PhyDetect */
+}
-static void TLan_PhyPowerDown( struct net_device *dev )
+static void tlan_phy_power_down(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 value;
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name );
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
- TLan_MiiSync( dev->base_addr );
- TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
- if ( ( priv->phyNum == 0 ) &&
- ( priv->phy[1] != TLAN_PHY_NONE ) &&
- ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) {
- TLan_MiiSync( dev->base_addr );
- TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value );
+ tlan_mii_sync(dev->base_addr);
+ tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
+ if ((priv->phy_num == 0) &&
+ (priv->phy[1] != TLAN_PHY_NONE) &&
+ (!(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))) {
+ tlan_mii_sync(dev->base_addr);
+ tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
}
/* Wait for 50 ms and powerup
* This is abitrary. It is intended to make sure the
* transceiver settles.
*/
- TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_PUP );
+ tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP);
-} /* TLan_PhyPowerDown */
+}
-static void TLan_PhyPowerUp( struct net_device *dev )
+static void tlan_phy_power_up(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 value;
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name );
- TLan_MiiSync( dev->base_addr );
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
+ tlan_mii_sync(dev->base_addr);
value = MII_GC_LOOPBK;
- TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
- TLan_MiiSync(dev->base_addr);
+ tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
+ tlan_mii_sync(dev->base_addr);
/* Wait for 500 ms and reset the
* transceiver. The TLAN docs say both 50 ms and
* 500 ms, so do the longer, just in case.
*/
- TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_RESET );
+ tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET);
-} /* TLan_PhyPowerUp */
+}
-static void TLan_PhyReset( struct net_device *dev )
+static void tlan_phy_reset(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 phy;
u16 value;
- phy = priv->phy[priv->phyNum];
+ phy = priv->phy[priv->phy_num];
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name );
- TLan_MiiSync( dev->base_addr );
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name);
+ tlan_mii_sync(dev->base_addr);
value = MII_GC_LOOPBK | MII_GC_RESET;
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, value );
- TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value );
- while ( value & MII_GC_RESET ) {
- TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value );
- }
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
+ tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
+ while (value & MII_GC_RESET)
+ tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
/* Wait for 500 ms and initialize.
* I don't remember why I wait this long.
* I've changed this to 50ms, as it seems long enough.
*/
- TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_START_LINK );
+ tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK);
-} /* TLan_PhyReset */
+}
-static void TLan_PhyStartLink( struct net_device *dev )
+static void tlan_phy_start_link(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 ability;
u16 control;
u16 data;
@@ -2638,86 +2727,88 @@ static void TLan_PhyStartLink( struct net_device *dev )
u16 status;
u16 tctl;
- phy = priv->phy[priv->phyNum];
- TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name );
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &ability );
+ phy = priv->phy[priv->phy_num];
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
- if ( ( status & MII_GS_AUTONEG ) &&
- ( ! priv->aui ) ) {
+ if ((status & MII_GS_AUTONEG) &&
+ (!priv->aui)) {
ability = status >> 11;
- if ( priv->speed == TLAN_SPEED_10 &&
- priv->duplex == TLAN_DUPLEX_HALF) {
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000);
- } else if ( priv->speed == TLAN_SPEED_10 &&
- priv->duplex == TLAN_DUPLEX_FULL) {
- priv->tlanFullDuplex = true;
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100);
- } else if ( priv->speed == TLAN_SPEED_100 &&
- priv->duplex == TLAN_DUPLEX_HALF) {
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000);
- } else if ( priv->speed == TLAN_SPEED_100 &&
- priv->duplex == TLAN_DUPLEX_FULL) {
- priv->tlanFullDuplex = true;
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100);
+ if (priv->speed == TLAN_SPEED_10 &&
+ priv->duplex == TLAN_DUPLEX_HALF) {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
+ } else if (priv->speed == TLAN_SPEED_10 &&
+ priv->duplex == TLAN_DUPLEX_FULL) {
+ priv->tlan_full_duplex = true;
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
+ } else if (priv->speed == TLAN_SPEED_100 &&
+ priv->duplex == TLAN_DUPLEX_HALF) {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
+ } else if (priv->speed == TLAN_SPEED_100 &&
+ priv->duplex == TLAN_DUPLEX_FULL) {
+ priv->tlan_full_duplex = true;
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
} else {
/* Set Auto-Neg advertisement */
- TLan_MiiWriteReg( dev, phy, MII_AN_ADV, (ability << 5) | 1);
+ tlan_mii_write_reg(dev, phy, MII_AN_ADV,
+ (ability << 5) | 1);
/* Enablee Auto-Neg */
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1000 );
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
/* Restart Auto-Neg */
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1200 );
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
/* Wait for 4 sec for autonegotiation
- * to complete. The max spec time is less than this
- * but the card need additional time to start AN.
- * .5 sec should be plenty extra.
- */
- printk( "TLAN: %s: Starting autonegotiation.\n", dev->name );
- TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN );
+ * to complete. The max spec time is less than this
+ * but the card need additional time to start AN.
+ * .5 sec should be plenty extra.
+ */
+ pr_info("TLAN: %s: Starting autonegotiation.\n",
+ dev->name);
+ tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
return;
}
}
- if ( ( priv->aui ) && ( priv->phyNum != 0 ) ) {
- priv->phyNum = 0;
- data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
- TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
- TLan_SetTimer( dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN );
+ if ((priv->aui) && (priv->phy_num != 0)) {
+ priv->phy_num = 0;
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
+ | TLAN_NET_CFG_PHY_EN;
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
+ tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN);
return;
- } else if ( priv->phyNum == 0 ) {
+ } else if (priv->phy_num == 0) {
control = 0;
- TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tctl );
- if ( priv->aui ) {
- tctl |= TLAN_TC_AUISEL;
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
+ if (priv->aui) {
+ tctl |= TLAN_TC_AUISEL;
} else {
- tctl &= ~TLAN_TC_AUISEL;
- if ( priv->duplex == TLAN_DUPLEX_FULL ) {
+ tctl &= ~TLAN_TC_AUISEL;
+ if (priv->duplex == TLAN_DUPLEX_FULL) {
control |= MII_GC_DUPLEX;
- priv->tlanFullDuplex = true;
+ priv->tlan_full_duplex = true;
}
- if ( priv->speed == TLAN_SPEED_100 ) {
+ if (priv->speed == TLAN_SPEED_100)
control |= MII_GC_SPEEDSEL;
- }
}
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, control );
- TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tctl );
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
}
/* Wait for 2 sec to give the transceiver time
* to establish link.
*/
- TLan_SetTimer( dev, (4*HZ), TLAN_TIMER_FINISH_RESET );
+ tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
-} /* TLan_PhyStartLink */
+}
-static void TLan_PhyFinishAutoNeg( struct net_device *dev )
+static void tlan_phy_finish_auto_neg(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 an_adv;
u16 an_lpa;
u16 data;
@@ -2725,115 +2816,118 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
u16 phy;
u16 status;
- phy = priv->phy[priv->phyNum];
+ phy = priv->phy[priv->phy_num];
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
- udelay( 1000 );
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ udelay(1000);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
- if ( ! ( status & MII_GS_AUTOCMPLT ) ) {
+ if (!(status & MII_GS_AUTOCMPLT)) {
/* Wait for 8 sec to give the process
* more time. Perhaps we should fail after a while.
*/
- if (!priv->neg_be_verbose++) {
- pr_info("TLAN: Giving autonegotiation more time.\n");
- pr_info("TLAN: Please check that your adapter has\n");
- pr_info("TLAN: been properly connected to a HUB or Switch.\n");
- pr_info("TLAN: Trying to establish link in the background...\n");
- }
- TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN );
+ if (!priv->neg_be_verbose++) {
+ pr_info("TLAN: Giving autonegotiation more time.\n");
+ pr_info("TLAN: Please check that your adapter has\n");
+ pr_info("TLAN: been properly connected to a HUB or Switch.\n");
+ pr_info("TLAN: Trying to establish link in the background...\n");
+ }
+ tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN);
return;
}
- printk( "TLAN: %s: Autonegotiation complete.\n", dev->name );
- TLan_MiiReadReg( dev, phy, MII_AN_ADV, &an_adv );
- TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa );
+ pr_info("TLAN: %s: Autonegotiation complete.\n", dev->name);
+ tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
+ tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
mode = an_adv & an_lpa & 0x03E0;
- if ( mode & 0x0100 ) {
- priv->tlanFullDuplex = true;
- } else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) {
- priv->tlanFullDuplex = true;
- }
-
- if ( ( ! ( mode & 0x0180 ) ) &&
- ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) &&
- ( priv->phyNum != 0 ) ) {
- priv->phyNum = 0;
- data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
- TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
- TLan_SetTimer( dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN );
+ if (mode & 0x0100)
+ priv->tlan_full_duplex = true;
+ else if (!(mode & 0x0080) && (mode & 0x0040))
+ priv->tlan_full_duplex = true;
+
+ if ((!(mode & 0x0180)) &&
+ (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
+ (priv->phy_num != 0)) {
+ priv->phy_num = 0;
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
+ | TLAN_NET_CFG_PHY_EN;
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
+ tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN);
return;
}
- if ( priv->phyNum == 0 ) {
- if ( ( priv->duplex == TLAN_DUPLEX_FULL ) ||
- ( an_adv & an_lpa & 0x0040 ) ) {
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL,
- MII_GC_AUTOENB | MII_GC_DUPLEX );
- pr_info("TLAN: Starting internal PHY with FULL-DUPLEX\n" );
+ if (priv->phy_num == 0) {
+ if ((priv->duplex == TLAN_DUPLEX_FULL) ||
+ (an_adv & an_lpa & 0x0040)) {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
+ MII_GC_AUTOENB | MII_GC_DUPLEX);
+ pr_info("TLAN: Starting internal PHY with FULL-DUPLEX\n");
} else {
- TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB );
- pr_info( "TLAN: Starting internal PHY with HALF-DUPLEX\n" );
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
+ MII_GC_AUTOENB);
+ pr_info("TLAN: Starting internal PHY with HALF-DUPLEX\n");
}
}
/* Wait for 100 ms. No reason in partiticular.
*/
- TLan_SetTimer( dev, (HZ/10), TLAN_TIMER_FINISH_RESET );
+ tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET);
-} /* TLan_PhyFinishAutoNeg */
+}
#ifdef MONITOR
- /*********************************************************************
- *
- * TLan_phyMonitor
- *
- * Returns:
- * None
- *
- * Params:
- * dev The device structure of this device.
- *
- *
- * This function monitors PHY condition by reading the status
- * register via the MII bus. This can be used to give info
- * about link changes (up/down), and possible switch to alternate
- * media.
- *
- * ******************************************************************/
-
-void TLan_PhyMonitor( struct net_device *dev )
+/*********************************************************************
+ *
+ * tlan_phy_monitor
+ *
+ * Returns:
+ * None
+ *
+ * Params:
+ * dev The device structure of this device.
+ *
+ *
+ * This function monitors PHY condition by reading the status
+ * register via the MII bus. This can be used to give info
+ * about link changes (up/down), and possible switch to alternate
+ * media.
+ *
+ *******************************************************************/
+
+void tlan_phy_monitor(struct net_device *dev)
{
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
u16 phy;
u16 phy_status;
- phy = priv->phy[priv->phyNum];
+ phy = priv->phy[priv->phy_num];
- /* Get PHY status register */
- TLan_MiiReadReg( dev, phy, MII_GEN_STS, &phy_status );
+ /* Get PHY status register */
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
- /* Check if link has been lost */
- if (!(phy_status & MII_GS_LINK)) {
- if (priv->link) {
- priv->link = 0;
- printk(KERN_DEBUG "TLAN: %s has lost link\n", dev->name);
- netif_carrier_off(dev);
- TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT );
- return;
+ /* Check if link has been lost */
+ if (!(phy_status & MII_GS_LINK)) {
+ if (priv->link) {
+ priv->link = 0;
+ printk(KERN_DEBUG "TLAN: %s has lost link\n",
+ dev->name);
+ netif_carrier_off(dev);
+ tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
+ return;
}
}
- /* Link restablished? */
- if ((phy_status & MII_GS_LINK) && !priv->link) {
- priv->link = 1;
- printk(KERN_DEBUG "TLAN: %s has reestablished link\n", dev->name);
+ /* Link restablished? */
+ if ((phy_status & MII_GS_LINK) && !priv->link) {
+ priv->link = 1;
+ printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
+ dev->name);
netif_carrier_on(dev);
- }
+ }
/* Setup a new monitor */
- TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT );
+ tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
}
#endif /* MONITOR */
@@ -2842,47 +2936,48 @@ void TLan_PhyMonitor( struct net_device *dev )
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver MII Routines
+ThunderLAN driver MII routines
- These routines are based on the information in Chap. 2 of the
- "ThunderLAN Programmer's Guide", pp. 15-24.
+these routines are based on the information in chap. 2 of the
+"ThunderLAN Programmer's Guide", pp. 15-24.
******************************************************************************
*****************************************************************************/
- /***************************************************************
- * TLan_MiiReadReg
- *
- * Returns:
- * false if ack received ok
- * true if no ack received or other error
- *
- * Parms:
- * dev The device structure containing
- * The io address and interrupt count
- * for this device.
- * phy The address of the PHY to be queried.
- * reg The register whose contents are to be
- * retrieved.
- * val A pointer to a variable to store the
- * retrieved value.
- *
- * This function uses the TLAN's MII bus to retrieve the contents
- * of a given register on a PHY. It sends the appropriate info
- * and then reads the 16-bit register value from the MII bus via
- * the TLAN SIO register.
- *
- **************************************************************/
-
-static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
+/***************************************************************
+ * tlan_mii_read_reg
+ *
+ * Returns:
+ * false if ack received ok
+ * true if no ack received or other error
+ *
+ * Parms:
+ * dev The device structure containing
+ * The io address and interrupt count
+ * for this device.
+ * phy The address of the PHY to be queried.
+ * reg The register whose contents are to be
+ * retrieved.
+ * val A pointer to a variable to store the
+ * retrieved value.
+ *
+ * This function uses the TLAN's MII bus to retrieve the contents
+ * of a given register on a PHY. It sends the appropriate info
+ * and then reads the 16-bit register value from the MII bus via
+ * the TLAN SIO register.
+ *
+ **************************************************************/
+
+static bool
+tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
{
u8 nack;
u16 sio, tmp;
- u32 i;
+ u32 i;
bool err;
int minten;
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
unsigned long flags = 0;
err = false;
@@ -2892,48 +2987,48 @@ static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val
if (!in_irq())
spin_lock_irqsave(&priv->lock, flags);
- TLan_MiiSync(dev->base_addr);
+ tlan_mii_sync(dev->base_addr);
- minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio );
- if ( minten )
- TLan_ClearBit(TLAN_NET_SIO_MINTEN, sio);
+ minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
+ if (minten)
+ tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
- TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */
- TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Read ( 10b ) */
- TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */
- TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */
+ tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */
+ tlan_mii_send_data(dev->base_addr, 0x2, 2); /* read (10b) */
+ tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */
+ tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */
- TLan_ClearBit(TLAN_NET_SIO_MTXEN, sio); /* Change direction */
+ tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio); /* change direction */
- TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Clock Idle bit */
- TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
- TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Wait 300ns */
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* clock idle bit */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* wait 300ns */
- nack = TLan_GetBit(TLAN_NET_SIO_MDATA, sio); /* Check for ACK */
- TLan_SetBit(TLAN_NET_SIO_MCLK, sio); /* Finish ACK */
- if (nack) { /* No ACK, so fake it */
+ nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio); /* check for ACK */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio); /* finish ACK */
+ if (nack) { /* no ACK, so fake it */
for (i = 0; i < 16; i++) {
- TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
- TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
}
tmp = 0xffff;
err = true;
} else { /* ACK, so read data */
for (tmp = 0, i = 0x8000; i; i >>= 1) {
- TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
- if (TLan_GetBit(TLAN_NET_SIO_MDATA, sio))
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
tmp |= i;
- TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
}
}
- TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Idle cycle */
- TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
- if ( minten )
- TLan_SetBit(TLAN_NET_SIO_MINTEN, sio);
+ if (minten)
+ tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
*val = tmp;
@@ -2942,116 +3037,117 @@ static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val
return err;
-} /* TLan_MiiReadReg */
+}
- /***************************************************************
- * TLan_MiiSendData
- *
- * Returns:
- * Nothing
- * Parms:
- * base_port The base IO port of the adapter in
- * question.
- * dev The address of the PHY to be queried.
- * data The value to be placed on the MII bus.
- * num_bits The number of bits in data that are to
- * be placed on the MII bus.
- *
- * This function sends on sequence of bits on the MII
- * configuration bus.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_mii_send_data
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * base_port The base IO port of the adapter in
+ * question.
+ * dev The address of the PHY to be queried.
+ * data The value to be placed on the MII bus.
+ * num_bits The number of bits in data that are to
+ * be placed on the MII bus.
+ *
+ * This function sends on sequence of bits on the MII
+ * configuration bus.
+ *
+ **************************************************************/
-static void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits )
+static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
{
u16 sio;
u32 i;
- if ( num_bits == 0 )
+ if (num_bits == 0)
return;
- outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
- TLan_SetBit( TLAN_NET_SIO_MTXEN, sio );
+ tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
- for ( i = ( 0x1 << ( num_bits - 1 ) ); i; i >>= 1 ) {
- TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
- (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio );
- if ( data & i )
- TLan_SetBit( TLAN_NET_SIO_MDATA, sio );
+ for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
+ if (data & i)
+ tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
else
- TLan_ClearBit( TLAN_NET_SIO_MDATA, sio );
- TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
- (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio );
+ tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
}
-} /* TLan_MiiSendData */
+}
- /***************************************************************
- * TLan_MiiSync
- *
- * Returns:
- * Nothing
- * Parms:
- * base_port The base IO port of the adapter in
- * question.
- *
- * This functions syncs all PHYs in terms of the MII configuration
- * bus.
- *
- **************************************************************/
+/***************************************************************
+ * TLan_MiiSync
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * base_port The base IO port of the adapter in
+ * question.
+ *
+ * This functions syncs all PHYs in terms of the MII configuration
+ * bus.
+ *
+ **************************************************************/
-static void TLan_MiiSync( u16 base_port )
+static void tlan_mii_sync(u16 base_port)
{
int i;
u16 sio;
- outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
- TLan_ClearBit( TLAN_NET_SIO_MTXEN, sio );
- for ( i = 0; i < 32; i++ ) {
- TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
- TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+ tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
+ for (i = 0; i < 32; i++) {
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
}
-} /* TLan_MiiSync */
+}
- /***************************************************************
- * TLan_MiiWriteReg
- *
- * Returns:
- * Nothing
- * Parms:
- * dev The device structure for the device
- * to write to.
- * phy The address of the PHY to be written to.
- * reg The register whose contents are to be
- * written.
- * val The value to be written to the register.
- *
- * This function uses the TLAN's MII bus to write the contents of a
- * given register on a PHY. It sends the appropriate info and then
- * writes the 16-bit register value from the MII configuration bus
- * via the TLAN SIO register.
- *
- **************************************************************/
+/***************************************************************
+ * tlan_mii_write_reg
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure for the device
+ * to write to.
+ * phy The address of the PHY to be written to.
+ * reg The register whose contents are to be
+ * written.
+ * val The value to be written to the register.
+ *
+ * This function uses the TLAN's MII bus to write the contents of a
+ * given register on a PHY. It sends the appropriate info and then
+ * writes the 16-bit register value from the MII configuration bus
+ * via the TLAN SIO register.
+ *
+ **************************************************************/
-static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val )
+static void
+tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
{
u16 sio;
int minten;
unsigned long flags = 0;
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
@@ -3059,30 +3155,30 @@ static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val
if (!in_irq())
spin_lock_irqsave(&priv->lock, flags);
- TLan_MiiSync( dev->base_addr );
+ tlan_mii_sync(dev->base_addr);
- minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio );
- if ( minten )
- TLan_ClearBit( TLAN_NET_SIO_MINTEN, sio );
+ minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
+ if (minten)
+ tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
- TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */
- TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Write ( 01b ) */
- TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */
- TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */
+ tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */
+ tlan_mii_send_data(dev->base_addr, 0x1, 2); /* write (01b) */
+ tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */
+ tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */
- TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Send ACK */
- TLan_MiiSendData( dev->base_addr, val, 16 ); /* Send Data */
+ tlan_mii_send_data(dev->base_addr, 0x2, 2); /* send ACK */
+ tlan_mii_send_data(dev->base_addr, val, 16); /* send data */
- TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); /* Idle cycle */
- TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
- if ( minten )
- TLan_SetBit( TLAN_NET_SIO_MINTEN, sio );
+ if (minten)
+ tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
if (!in_irq())
spin_unlock_irqrestore(&priv->lock, flags);
-} /* TLan_MiiWriteReg */
+}
@@ -3090,229 +3186,226 @@ static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val
/*****************************************************************************
******************************************************************************
- ThunderLAN Driver Eeprom routines
+ThunderLAN driver eeprom routines
- The Compaq Netelligent 10 and 10/100 cards use a Microchip 24C02A
- EEPROM. These functions are based on information in Microchip's
- data sheet. I don't know how well this functions will work with
- other EEPROMs.
+the Compaq netelligent 10 and 10/100 cards use a microchip 24C02A
+EEPROM. these functions are based on information in microchip's
+data sheet. I don't know how well this functions will work with
+other Eeproms.
******************************************************************************
*****************************************************************************/
- /***************************************************************
- * TLan_EeSendStart
- *
- * Returns:
- * Nothing
- * Parms:
- * io_base The IO port base address for the
- * TLAN device with the EEPROM to
- * use.
- *
- * This function sends a start cycle to an EEPROM attached
- * to a TLAN chip.
- *
- **************************************************************/
-
-static void TLan_EeSendStart( u16 io_base )
+/***************************************************************
+ * tlan_ee_send_start
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ *
+ * This function sends a start cycle to an EEPROM attached
+ * to a TLAN chip.
+ *
+ **************************************************************/
+
+static void tlan_ee_send_start(u16 io_base)
{
u16 sio;
- outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
- TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
- TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
-
-} /* TLan_EeSendStart */
-
-
-
-
- /***************************************************************
- * TLan_EeSendByte
- *
- * Returns:
- * If the correct ack was received, 0, otherwise 1
- * Parms: io_base The IO port base address for the
- * TLAN device with the EEPROM to
- * use.
- * data The 8 bits of information to
- * send to the EEPROM.
- * stop If TLAN_EEPROM_STOP is passed, a
- * stop cycle is sent after the
- * byte is sent after the ack is
- * read.
- *
- * This function sends a byte on the serial EEPROM line,
- * driving the clock to send each bit. The function then
- * reverses transmission direction and reads an acknowledge
- * bit.
- *
- **************************************************************/
-
-static int TLan_EeSendByte( u16 io_base, u8 data, int stop )
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_ee_send_byte
+ *
+ * Returns:
+ * If the correct ack was received, 0, otherwise 1
+ * Parms: io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * data The 8 bits of information to
+ * send to the EEPROM.
+ * stop If TLAN_EEPROM_STOP is passed, a
+ * stop cycle is sent after the
+ * byte is sent after the ack is
+ * read.
+ *
+ * This function sends a byte on the serial EEPROM line,
+ * driving the clock to send each bit. The function then
+ * reverses transmission direction and reads an acknowledge
+ * bit.
+ *
+ **************************************************************/
+
+static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
{
int err;
u8 place;
u16 sio;
- outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
/* Assume clock is low, tx is enabled; */
- for ( place = 0x80; place != 0; place >>= 1 ) {
- if ( place & data )
- TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+ for (place = 0x80; place != 0; place >>= 1) {
+ if (place & data)
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
else
- TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
}
- TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio );
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- err = TLan_GetBit( TLAN_NET_SIO_EDATA, sio );
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
+ tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
- if ( ( ! err ) && stop ) {
+ if ((!err) && stop) {
/* STOP, raise data while clock is high */
- TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
}
return err;
-} /* TLan_EeSendByte */
-
-
-
-
- /***************************************************************
- * TLan_EeReceiveByte
- *
- * Returns:
- * Nothing
- * Parms:
- * io_base The IO port base address for the
- * TLAN device with the EEPROM to
- * use.
- * data An address to a char to hold the
- * data sent from the EEPROM.
- * stop If TLAN_EEPROM_STOP is passed, a
- * stop cycle is sent after the
- * byte is received, and no ack is
- * sent.
- *
- * This function receives 8 bits of data from the EEPROM
- * over the serial link. It then sends and ack bit, or no
- * ack and a stop bit. This function is used to retrieve
- * data after the address of a byte in the EEPROM has been
- * sent.
- *
- **************************************************************/
-
-static void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop )
+}
+
+
+
+
+/***************************************************************
+ * tlan_ee_receive_byte
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * data An address to a char to hold the
+ * data sent from the EEPROM.
+ * stop If TLAN_EEPROM_STOP is passed, a
+ * stop cycle is sent after the
+ * byte is received, and no ack is
+ * sent.
+ *
+ * This function receives 8 bits of data from the EEPROM
+ * over the serial link. It then sends and ack bit, or no
+ * ack and a stop bit. This function is used to retrieve
+ * data after the address of a byte in the EEPROM has been
+ * sent.
+ *
+ **************************************************************/
+
+static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
{
u8 place;
u16 sio;
- outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+ outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
*data = 0;
/* Assume clock is low, tx is enabled; */
- TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio );
- for ( place = 0x80; place; place >>= 1 ) {
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- if ( TLan_GetBit( TLAN_NET_SIO_EDATA, sio ) )
+ tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
+ for (place = 0x80; place; place >>= 1) {
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
*data |= place;
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
}
- TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
- if ( ! stop ) {
- TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* Ack = 0 */
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+ if (!stop) {
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); /* ack = 0 */
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
} else {
- TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); /* No ack = 1 (?) */
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio); /* no ack = 1 (?) */
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
/* STOP, raise data while clock is high */
- TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
- TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
- TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
- }
-
-} /* TLan_EeReceiveByte */
-
-
-
-
- /***************************************************************
- * TLan_EeReadByte
- *
- * Returns:
- * No error = 0, else, the stage at which the error
- * occurred.
- * Parms:
- * io_base The IO port base address for the
- * TLAN device with the EEPROM to
- * use.
- * ee_addr The address of the byte in the
- * EEPROM whose contents are to be
- * retrieved.
- * data An address to a char to hold the
- * data obtained from the EEPROM.
- *
- * This function reads a byte of information from an byte
- * cell in the EEPROM.
- *
- **************************************************************/
-
-static int TLan_EeReadByte( struct net_device *dev, u8 ee_addr, u8 *data )
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+ }
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_ee_read_byte
+ *
+ * Returns:
+ * No error = 0, else, the stage at which the error
+ * occurred.
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * ee_addr The address of the byte in the
+ * EEPROM whose contents are to be
+ * retrieved.
+ * data An address to a char to hold the
+ * data obtained from the EEPROM.
+ *
+ * This function reads a byte of information from an byte
+ * cell in the EEPROM.
+ *
+ **************************************************************/
+
+static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
{
int err;
- TLanPrivateInfo *priv = netdev_priv(dev);
+ struct tlan_priv *priv = netdev_priv(dev);
unsigned long flags = 0;
- int ret=0;
+ int ret = 0;
spin_lock_irqsave(&priv->lock, flags);
- TLan_EeSendStart( dev->base_addr );
- err = TLan_EeSendByte( dev->base_addr, 0xA0, TLAN_EEPROM_ACK );
- if (err)
- {
- ret=1;
+ tlan_ee_send_start(dev->base_addr);
+ err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
+ if (err) {
+ ret = 1;
goto fail;
}
- err = TLan_EeSendByte( dev->base_addr, ee_addr, TLAN_EEPROM_ACK );
- if (err)
- {
- ret=2;
+ err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
+ if (err) {
+ ret = 2;
goto fail;
}
- TLan_EeSendStart( dev->base_addr );
- err = TLan_EeSendByte( dev->base_addr, 0xA1, TLAN_EEPROM_ACK );
- if (err)
- {
- ret=3;
+ tlan_ee_send_start(dev->base_addr);
+ err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
+ if (err) {
+ ret = 3;
goto fail;
}
- TLan_EeReceiveByte( dev->base_addr, data, TLAN_EEPROM_STOP );
+ tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
fail:
spin_unlock_irqrestore(&priv->lock, flags);
return ret;
-} /* TLan_EeReadByte */
+}
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index 3315ced..5fc98a8 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -20,8 +20,8 @@
********************************************************************/
-#include <asm/io.h>
-#include <asm/types.h>
+#include <linux/io.h>
+#include <linux/types.h>
#include <linux/netdevice.h>
@@ -40,8 +40,11 @@
#define TLAN_IGNORE 0
#define TLAN_RECORD 1
-#define TLAN_DBG(lvl, format, args...) \
- do { if (debug&lvl) printk(KERN_DEBUG "TLAN: " format, ##args ); } while(0)
+#define TLAN_DBG(lvl, format, args...) \
+ do { \
+ if (debug&lvl) \
+ printk(KERN_DEBUG "TLAN: " format, ##args); \
+ } while (0)
#define TLAN_DEBUG_GNRL 0x0001
#define TLAN_DEBUG_TX 0x0002
@@ -50,7 +53,8 @@
#define TLAN_DEBUG_PROBE 0x0010
#define TX_TIMEOUT (10*HZ) /* We need time for auto-neg */
-#define MAX_TLAN_BOARDS 8 /* Max number of boards installed at a time */
+#define MAX_TLAN_BOARDS 8 /* Max number of boards installed
+ at a time */
/*****************************************************************
@@ -70,13 +74,13 @@
#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014
#endif
-typedef struct tlan_adapter_entry {
- u16 vendorId;
- u16 deviceId;
- char *deviceLabel;
+struct tlan_adapter_entry {
+ u16 vendor_id;
+ u16 device_id;
+ char *device_label;
u32 flags;
- u16 addrOfs;
-} TLanAdapterEntry;
+ u16 addr_ofs;
+};
#define TLAN_ADAPTER_NONE 0x00000000
#define TLAN_ADAPTER_UNMANAGED_PHY 0x00000001
@@ -129,18 +133,18 @@ typedef struct tlan_adapter_entry {
#define TLAN_CSTAT_DP_PR 0x0100
-typedef struct tlan_buffer_ref_tag {
+struct tlan_buffer {
u32 count;
u32 address;
-} TLanBufferRef;
+};
-typedef struct tlan_list_tag {
+struct tlan_list {
u32 forward;
- u16 cStat;
- u16 frameSize;
- TLanBufferRef buffer[TLAN_BUFFERS_PER_LIST];
-} TLanList;
+ u16 c_stat;
+ u16 frame_size;
+ struct tlan_buffer buffer[TLAN_BUFFERS_PER_LIST];
+};
typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
@@ -164,49 +168,49 @@ typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
*
****************************************************************/
-typedef struct tlan_private_tag {
- struct net_device *nextDevice;
- struct pci_dev *pciDev;
+struct tlan_priv {
+ struct net_device *next_device;
+ struct pci_dev *pci_dev;
struct net_device *dev;
- void *dmaStorage;
- dma_addr_t dmaStorageDMA;
- unsigned int dmaSize;
- u8 *padBuffer;
- TLanList *rxList;
- dma_addr_t rxListDMA;
- u8 *rxBuffer;
- dma_addr_t rxBufferDMA;
- u32 rxHead;
- u32 rxTail;
- u32 rxEocCount;
- TLanList *txList;
- dma_addr_t txListDMA;
- u8 *txBuffer;
- dma_addr_t txBufferDMA;
- u32 txHead;
- u32 txInProgress;
- u32 txTail;
- u32 txBusyCount;
- u32 phyOnline;
- u32 timerSetAt;
- u32 timerType;
+ void *dma_storage;
+ dma_addr_t dma_storage_dma;
+ unsigned int dma_size;
+ u8 *pad_buffer;
+ struct tlan_list *rx_list;
+ dma_addr_t rx_list_dma;
+ u8 *rx_buffer;
+ dma_addr_t rx_buffer_dma;
+ u32 rx_head;
+ u32 rx_tail;
+ u32 rx_eoc_count;
+ struct tlan_list *tx_list;
+ dma_addr_t tx_list_dma;
+ u8 *tx_buffer;
+ dma_addr_t tx_buffer_dma;
+ u32 tx_head;
+ u32 tx_in_progress;
+ u32 tx_tail;
+ u32 tx_busy_count;
+ u32 phy_online;
+ u32 timer_set_at;
+ u32 timer_type;
struct timer_list timer;
struct board *adapter;
- u32 adapterRev;
+ u32 adapter_rev;
u32 aui;
u32 debug;
u32 duplex;
u32 phy[2];
- u32 phyNum;
+ u32 phy_num;
u32 speed;
- u8 tlanRev;
- u8 tlanFullDuplex;
+ u8 tlan_rev;
+ u8 tlan_full_duplex;
spinlock_t lock;
u8 link;
u8 is_eisa;
struct work_struct tlan_tqueue;
u8 neg_be_verbose;
-} TLanPrivateInfo;
+};
@@ -247,7 +251,7 @@ typedef struct tlan_private_tag {
****************************************************************/
#define TLAN_HOST_CMD 0x00
-#define TLAN_HC_GO 0x80000000
+#define TLAN_HC_GO 0x80000000
#define TLAN_HC_STOP 0x40000000
#define TLAN_HC_ACK 0x20000000
#define TLAN_HC_CS_MASK 0x1FE00000
@@ -283,7 +287,7 @@ typedef struct tlan_private_tag {
#define TLAN_NET_CMD_TRFRAM 0x02
#define TLAN_NET_CMD_TXPACE 0x01
#define TLAN_NET_SIO 0x01
-#define TLAN_NET_SIO_MINTEN 0x80
+#define TLAN_NET_SIO_MINTEN 0x80
#define TLAN_NET_SIO_ECLOK 0x40
#define TLAN_NET_SIO_ETXEN 0x20
#define TLAN_NET_SIO_EDATA 0x10
@@ -304,7 +308,7 @@ typedef struct tlan_private_tag {
#define TLAN_NET_MASK_MASK4 0x10
#define TLAN_NET_MASK_RSRVD 0x0F
#define TLAN_NET_CONFIG 0x04
-#define TLAN_NET_CFG_RCLK 0x8000
+#define TLAN_NET_CFG_RCLK 0x8000
#define TLAN_NET_CFG_TCLK 0x4000
#define TLAN_NET_CFG_BIT 0x2000
#define TLAN_NET_CFG_RXCRC 0x1000
@@ -372,7 +376,7 @@ typedef struct tlan_private_tag {
/* Generic MII/PHY Registers */
#define MII_GEN_CTL 0x00
-#define MII_GC_RESET 0x8000
+#define MII_GC_RESET 0x8000
#define MII_GC_LOOPBK 0x4000
#define MII_GC_SPEEDSEL 0x2000
#define MII_GC_AUTOENB 0x1000
@@ -397,9 +401,9 @@ typedef struct tlan_private_tag {
#define MII_GS_EXTCAP 0x0001
#define MII_GEN_ID_HI 0x02
#define MII_GEN_ID_LO 0x03
-#define MII_GIL_OUI 0xFC00
-#define MII_GIL_MODEL 0x03F0
-#define MII_GIL_REVISION 0x000F
+#define MII_GIL_OUI 0xFC00
+#define MII_GIL_MODEL 0x03F0
+#define MII_GIL_REVISION 0x000F
#define MII_AN_ADV 0x04
#define MII_AN_LPA 0x05
#define MII_AN_EXP 0x06
@@ -408,7 +412,7 @@ typedef struct tlan_private_tag {
#define TLAN_TLPHY_ID 0x10
#define TLAN_TLPHY_CTL 0x11
-#define TLAN_TC_IGLINK 0x8000
+#define TLAN_TC_IGLINK 0x8000
#define TLAN_TC_SWAPOL 0x4000
#define TLAN_TC_AUISEL 0x2000
#define TLAN_TC_SQEEN 0x1000
@@ -435,41 +439,41 @@ typedef struct tlan_private_tag {
#define LEVEL1_ID1 0x7810
#define LEVEL1_ID2 0x0000
-#define CIRC_INC( a, b ) if ( ++a >= b ) a = 0
+#define CIRC_INC(a, b) if (++a >= b) a = 0
/* Routines to access internal registers. */
-static inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr)
+static inline u8 tlan_dio_read8(u16 base_addr, u16 internal_addr)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3));
-} /* TLan_DioRead8 */
+}
-static inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr)
+static inline u16 tlan_dio_read16(u16 base_addr, u16 internal_addr)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2));
-} /* TLan_DioRead16 */
+}
-static inline u32 TLan_DioRead32(u16 base_addr, u16 internal_addr)
+static inline u32 tlan_dio_read32(u16 base_addr, u16 internal_addr)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
return inl(base_addr + TLAN_DIO_DATA);
-} /* TLan_DioRead32 */
+}
-static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data)
+static inline void tlan_dio_write8(u16 base_addr, u16 internal_addr, u8 data)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3));
@@ -479,7 +483,7 @@ static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data)
-static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data)
+static inline void tlan_dio_write16(u16 base_addr, u16 internal_addr, u16 data)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
@@ -489,16 +493,16 @@ static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data)
-static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data)
+static inline void tlan_dio_write32(u16 base_addr, u16 internal_addr, u32 data)
{
outw(internal_addr, base_addr + TLAN_DIO_ADR);
outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
}
-#define TLan_ClearBit( bit, port ) outb_p(inb_p(port) & ~bit, port)
-#define TLan_GetBit( bit, port ) ((int) (inb_p(port) & bit))
-#define TLan_SetBit( bit, port ) outb_p(inb_p(port) | bit, port)
+#define tlan_clear_bit(bit, port) outb_p(inb_p(port) & ~bit, port)
+#define tlan_get_bit(bit, port) ((int) (inb_p(port) & bit))
+#define tlan_set_bit(bit, port) outb_p(inb_p(port) | bit, port)
/*
* given 6 bytes, view them as 8 6-bit numbers and return the XOR of those
@@ -506,37 +510,37 @@ static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data)
*
* The original code was:
*
- * u32 xor( u32 a, u32 b ) { return ( ( a && ! b ) || ( ! a && b ) ); }
+ * u32 xor(u32 a, u32 b) { return ((a && !b ) || (! a && b )); }
*
- * #define XOR8( a, b, c, d, e, f, g, h ) \
- * xor( a, xor( b, xor( c, xor( d, xor( e, xor( f, xor( g, h ) ) ) ) ) ) )
- * #define DA( a, bit ) ( ( (u8) a[bit/8] ) & ( (u8) ( 1 << bit%8 ) ) )
+ * #define XOR8(a, b, c, d, e, f, g, h) \
+ * xor(a, xor(b, xor(c, xor(d, xor(e, xor(f, xor(g, h)) ) ) ) ) )
+ * #define DA(a, bit) (( (u8) a[bit/8] ) & ( (u8) (1 << bit%8)) )
*
- * hash = XOR8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
- * DA(a,30), DA(a,36), DA(a,42) );
- * hash |= XOR8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
- * DA(a,31), DA(a,37), DA(a,43) ) << 1;
- * hash |= XOR8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
- * DA(a,32), DA(a,38), DA(a,44) ) << 2;
- * hash |= XOR8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
- * DA(a,33), DA(a,39), DA(a,45) ) << 3;
- * hash |= XOR8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
- * DA(a,34), DA(a,40), DA(a,46) ) << 4;
- * hash |= XOR8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
- * DA(a,35), DA(a,41), DA(a,47) ) << 5;
+ * hash = XOR8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
+ * DA(a,30), DA(a,36), DA(a,42));
+ * hash |= XOR8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
+ * DA(a,31), DA(a,37), DA(a,43)) << 1;
+ * hash |= XOR8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
+ * DA(a,32), DA(a,38), DA(a,44)) << 2;
+ * hash |= XOR8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
+ * DA(a,33), DA(a,39), DA(a,45)) << 3;
+ * hash |= XOR8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
+ * DA(a,34), DA(a,40), DA(a,46)) << 4;
+ * hash |= XOR8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
+ * DA(a,35), DA(a,41), DA(a,47)) << 5;
*
*/
-static inline u32 TLan_HashFunc( const u8 *a )
+static inline u32 tlan_hash_func(const u8 *a)
{
- u8 hash;
+ u8 hash;
- hash = (a[0]^a[3]); /* & 077 */
- hash ^= ((a[0]^a[3])>>6); /* & 003 */
- hash ^= ((a[1]^a[4])<<2); /* & 074 */
- hash ^= ((a[1]^a[4])>>4); /* & 017 */
- hash ^= ((a[2]^a[5])<<4); /* & 060 */
- hash ^= ((a[2]^a[5])>>2); /* & 077 */
+ hash = (a[0]^a[3]); /* & 077 */
+ hash ^= ((a[0]^a[3])>>6); /* & 003 */
+ hash ^= ((a[1]^a[4])<<2); /* & 074 */
+ hash ^= ((a[1]^a[4])>>4); /* & 017 */
+ hash ^= ((a[2]^a[5])<<4); /* & 060 */
+ hash ^= ((a[2]^a[5])>>2); /* & 077 */
- return hash & 077;
+ return hash & 077;
}
#endif
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b100bd5..55786a0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1142,7 +1142,7 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun,
* privs required. */
static int set_offload(struct net_device *dev, unsigned long arg)
{
- unsigned int old_features, features;
+ u32 old_features, features;
old_features = dev->features;
/* Unset features, set them as we chew on the arg. */
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index a3c46f6..7fa5ec2 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -123,12 +123,11 @@ static const int multicast_filter_limit = 32;
#include <linux/in6.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
-#include <generated/utsrelease.h>
#include "typhoon.h"
MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
-MODULE_VERSION(UTS_RELEASE);
+MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_NAME);
MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index cc83fa7..105d7f0 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -403,17 +403,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
if (tb[IFLA_ADDRESS] == NULL)
random_ether_addr(dev->dev_addr);
- if (tb[IFLA_IFNAME])
- nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
- else
- snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
-
- if (strchr(dev->name, '%')) {
- err = dev_alloc_name(dev, dev->name);
- if (err < 0)
- goto err_alloc_name;
- }
-
err = register_netdevice(dev);
if (err < 0)
goto err_register_dev;
@@ -433,7 +422,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
err_register_dev:
/* nothing to do */
-err_alloc_name:
err_configure_peer:
unregister_netdevice(peer);
return err;
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 09cac70..0d6fec6 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2923,6 +2923,7 @@ static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
static int velocity_set_wol(struct velocity_info *vptr)
{
struct mac_regs __iomem *regs = vptr->mac_regs;
+ enum speed_opt spd_dpx = vptr->options.spd_dpx;
static u8 buf[256];
int i;
@@ -2968,6 +2969,12 @@ static int velocity_set_wol(struct velocity_info *vptr)
writew(0x0FFF, &regs->WOLSRClr);
+ if (spd_dpx == SPD_DPX_1000_FULL)
+ goto mac_done;
+
+ if (spd_dpx != SPD_DPX_AUTO)
+ goto advertise_done;
+
if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
@@ -2978,6 +2985,7 @@ static int velocity_set_wol(struct velocity_info *vptr)
if (vptr->mii_status & VELOCITY_SPEED_1000)
MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
+advertise_done:
BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
{
@@ -2987,6 +2995,7 @@ static int velocity_set_wol(struct velocity_info *vptr)
writeb(GCR, &regs->CHIPGCR);
}
+mac_done:
BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
/* Turn on SWPTAG just before entering power mode */
BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index aa2e69b..d722753 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -361,7 +361,7 @@ enum velocity_owner {
#define MAC_REG_CHIPGSR 0x9C
#define MAC_REG_TESTCFG 0x9D
#define MAC_REG_DEBUG 0x9E
-#define MAC_REG_CHIPGCR 0x9F
+#define MAC_REG_CHIPGCR 0x9F /* Chip Operation and Diagnostic Control */
#define MAC_REG_WOLCR0_SET 0xA0
#define MAC_REG_WOLCR1_SET 0xA1
#define MAC_REG_PWCFG_SET 0xA2
@@ -848,10 +848,10 @@ enum velocity_owner {
* Bits in CHIPGCR register
*/
-#define CHIPGCR_FCGMII 0x80 /* enable GMII mode */
-#define CHIPGCR_FCFDX 0x40
+#define CHIPGCR_FCGMII 0x80 /* force GMII (else MII only) */
+#define CHIPGCR_FCFDX 0x40 /* force full duplex */
#define CHIPGCR_FCRESV 0x20
-#define CHIPGCR_FCMODE 0x10
+#define CHIPGCR_FCMODE 0x10 /* enable MAC forced mode */
#define CHIPGCR_LPSOPT 0x08
#define CHIPGCR_TM1US 0x04
#define CHIPGCR_TM0US 0x02
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 228d4f7..e74e4b4 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -387,8 +387,8 @@ vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
data1 = steer_ctrl = 0;
status = vxge_hw_vpath_fw_api(vpath,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
VXGE_HW_FW_API_GET_EPROM_REV,
+ VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
0, &data0, &data1, &steer_ctrl);
if (status != VXGE_HW_OK)
break;
@@ -2868,6 +2868,8 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
ring->rxd_init = attr->rxd_init;
ring->rxd_term = attr->rxd_term;
ring->buffer_mode = config->buffer_mode;
+ ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
+ ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
ring->rxds_limit = config->rxds_limit;
ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
@@ -3511,6 +3513,8 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
/* apply "interrupts per txdl" attribute */
fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
+ fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
+ fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
if (fifo->config->intr)
fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
@@ -4377,6 +4381,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
}
writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+ vpath->tim_tti_cfg1_saved = val64;
+
val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4433,6 +4439,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
}
writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
+ vpath->tim_tti_cfg3_saved = val64;
}
if (config->ring.enable == VXGE_HW_RING_ENABLE) {
@@ -4481,6 +4488,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
}
writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
+ vpath->tim_rti_cfg1_saved = val64;
+
val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4537,6 +4546,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
}
writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
+ vpath->tim_rti_cfg3_saved = val64;
}
val64 = 0;
@@ -4555,26 +4565,6 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
return status;
}
-void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
-{
- struct __vxge_hw_virtualpath *vpath;
- struct vxge_hw_vpath_reg __iomem *vp_reg;
- struct vxge_hw_vp_config *config;
- u64 val64;
-
- vpath = &hldev->virtual_paths[vp_id];
- vp_reg = vpath->vp_reg;
- config = vpath->vp_config;
-
- if (config->fifo.enable == VXGE_HW_FIFO_ENABLE &&
- config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
- config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
- val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
- val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
- writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
- }
-}
-
/*
* __vxge_hw_vpath_initialize
* This routine is the final phase of init which initializes the
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index e249e28..3c53aa7 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -682,6 +682,10 @@ struct __vxge_hw_virtualpath {
u32 vsport_number;
u32 max_kdfc_db;
u32 max_nofl_db;
+ u64 tim_tti_cfg1_saved;
+ u64 tim_tti_cfg3_saved;
+ u64 tim_rti_cfg1_saved;
+ u64 tim_rti_cfg3_saved;
struct __vxge_hw_ring *____cacheline_aligned ringh;
struct __vxge_hw_fifo *____cacheline_aligned fifoh;
@@ -921,6 +925,9 @@ struct __vxge_hw_ring {
u32 doorbell_cnt;
u32 total_db_cnt;
u64 rxds_limit;
+ u32 rtimer;
+ u64 tim_rti_cfg1_saved;
+ u64 tim_rti_cfg3_saved;
enum vxge_hw_status (*callback)(
struct __vxge_hw_ring *ringh,
@@ -1000,6 +1007,9 @@ struct __vxge_hw_fifo {
u32 per_txdl_space;
u32 vp_id;
u32 tx_intr_num;
+ u32 rtimer;
+ u64 tim_tti_cfg1_saved;
+ u64 tim_tti_cfg3_saved;
enum vxge_hw_status (*callback)(
struct __vxge_hw_fifo *fifo_handle,
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index c81a651..e40f619 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -371,9 +371,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
struct vxge_hw_ring_rxd_info ext_info;
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
ring->ndev->name, __func__, __LINE__);
- ring->pkts_processed = 0;
-
- vxge_hw_ring_replenish(ringh);
do {
prefetch((char *)dtr + L1_CACHE_BYTES);
@@ -1588,6 +1585,36 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
return ret;
}
+/* Configure CI */
+static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
+{
+ int i = 0;
+
+ /* Enable CI for RTI */
+ if (vdev->config.intr_type == MSI_X) {
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ struct __vxge_hw_ring *hw_ring;
+
+ hw_ring = vdev->vpaths[i].ring.handle;
+ vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
+ }
+ }
+
+ /* Enable CI for TTI */
+ for (i = 0; i < vdev->no_of_vpath; i++) {
+ struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
+ vxge_hw_vpath_tti_ci_set(hw_fifo);
+ /*
+ * For Inta (with or without napi), Set CI ON for only one
+ * vpath. (Have only one free running timer).
+ */
+ if ((vdev->config.intr_type == INTA) && (i == 0))
+ break;
+ }
+
+ return;
+}
+
static int do_vxge_reset(struct vxgedev *vdev, int event)
{
enum vxge_hw_status status;
@@ -1753,6 +1780,9 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
netif_tx_wake_all_queues(vdev->ndev);
}
+ /* configure CI */
+ vxge_config_ci_for_tti_rti(vdev);
+
out:
vxge_debug_entryexit(VXGE_TRACE,
"%s:%d Exiting...", __func__, __LINE__);
@@ -1793,22 +1823,29 @@ static void vxge_reset(struct work_struct *work)
*/
static int vxge_poll_msix(struct napi_struct *napi, int budget)
{
- struct vxge_ring *ring =
- container_of(napi, struct vxge_ring, napi);
+ struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
+ int pkts_processed;
int budget_org = budget;
- ring->budget = budget;
+ ring->budget = budget;
+ ring->pkts_processed = 0;
vxge_hw_vpath_poll_rx(ring->handle);
+ pkts_processed = ring->pkts_processed;
if (ring->pkts_processed < budget_org) {
napi_complete(napi);
+
/* Re enable the Rx interrupts for the vpath */
vxge_hw_channel_msix_unmask(
(struct __vxge_hw_channel *)ring->handle,
ring->rx_vector_no);
+ mmiowb();
}
- return ring->pkts_processed;
+ /* We are copying and returning the local variable, in case if after
+ * clearing the msix interrupt above, if the interrupt fires right
+ * away which can preempt this NAPI thread */
+ return pkts_processed;
}
static int vxge_poll_inta(struct napi_struct *napi, int budget)
@@ -1824,6 +1861,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
for (i = 0; i < vdev->no_of_vpath; i++) {
ring = &vdev->vpaths[i].ring;
ring->budget = budget;
+ ring->pkts_processed = 0;
vxge_hw_vpath_poll_rx(ring->handle);
pkts_processed += ring->pkts_processed;
budget -= ring->pkts_processed;
@@ -2054,6 +2092,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
netdev_get_tx_queue(vdev->ndev, 0);
vpath->fifo.indicate_max_pkts =
vdev->config.fifo_indicate_max_pkts;
+ vpath->fifo.tx_vector_no = 0;
vpath->ring.rx_vector_no = 0;
vpath->ring.rx_csum = vdev->rx_csum;
vpath->ring.rx_hwts = vdev->rx_hwts;
@@ -2079,6 +2118,61 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
return VXGE_HW_OK;
}
+/**
+ * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
+ * if the interrupts are not within a range
+ * @fifo: pointer to transmit fifo structure
+ * Description: The function changes boundary timer and restriction timer
+ * value depends on the traffic
+ * Return Value: None
+ */
+static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
+{
+ fifo->interrupt_count++;
+ if (jiffies > fifo->jiffies + HZ / 100) {
+ struct __vxge_hw_fifo *hw_fifo = fifo->handle;
+
+ fifo->jiffies = jiffies;
+ if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
+ hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
+ hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
+ vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
+ } else if (hw_fifo->rtimer != 0) {
+ hw_fifo->rtimer = 0;
+ vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
+ }
+ fifo->interrupt_count = 0;
+ }
+}
+
+/**
+ * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
+ * if the interrupts are not within a range
+ * @ring: pointer to receive ring structure
+ * Description: The function increases of decreases the packet counts within
+ * the ranges of traffic utilization, if the interrupts due to this ring are
+ * not within a fixed range.
+ * Return Value: Nothing
+ */
+static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
+{
+ ring->interrupt_count++;
+ if (jiffies > ring->jiffies + HZ / 100) {
+ struct __vxge_hw_ring *hw_ring = ring->handle;
+
+ ring->jiffies = jiffies;
+ if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
+ hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
+ hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
+ vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
+ } else if (hw_ring->rtimer != 0) {
+ hw_ring->rtimer = 0;
+ vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
+ }
+ ring->interrupt_count = 0;
+ }
+}
+
/*
* vxge_isr_napi
* @irq: the irq of the device.
@@ -2139,24 +2233,39 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
#ifdef CONFIG_PCI_MSI
-static irqreturn_t
-vxge_tx_msix_handle(int irq, void *dev_id)
+static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
{
struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
+ adaptive_coalesce_tx_interrupts(fifo);
+
+ vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
+ fifo->tx_vector_no);
+
+ vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
+ fifo->tx_vector_no);
+
VXGE_COMPLETE_VPATH_TX(fifo);
+ vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
+ fifo->tx_vector_no);
+
+ mmiowb();
+
return IRQ_HANDLED;
}
-static irqreturn_t
-vxge_rx_msix_napi_handle(int irq, void *dev_id)
+static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
{
struct vxge_ring *ring = (struct vxge_ring *)dev_id;
- /* MSIX_IDX for Rx is 1 */
+ adaptive_coalesce_rx_interrupts(ring);
+
vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
- ring->rx_vector_no);
+ ring->rx_vector_no);
+
+ vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
+ ring->rx_vector_no);
napi_schedule(&ring->napi);
return IRQ_HANDLED;
@@ -2173,14 +2282,20 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
for (i = 0; i < vdev->no_of_vpath; i++) {
+ /* Reduce the chance of loosing alarm interrupts by masking
+ * the vector. A pending bit will be set if an alarm is
+ * generated and on unmask the interrupt will be fired.
+ */
vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
+ vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
+ mmiowb();
status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
vdev->exec_mode);
if (status == VXGE_HW_OK) {
-
vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
- msix_id);
+ msix_id);
+ mmiowb();
continue;
}
vxge_debug_intr(VXGE_ERR,
@@ -2299,6 +2414,9 @@ static int vxge_enable_msix(struct vxgedev *vdev)
vpath->ring.rx_vector_no = (vpath->device_id *
VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
+ vpath->fifo.tx_vector_no = (vpath->device_id *
+ VXGE_HW_VPATH_MSIX_ACTIVE);
+
vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
VXGE_ALARM_MSIX_ID);
}
@@ -2474,8 +2592,9 @@ INTA_MODE:
"%s:vxge:INTA", vdev->ndev->name);
vxge_hw_device_set_intr_type(vdev->devh,
VXGE_HW_INTR_MODE_IRQLINE);
- vxge_hw_vpath_tti_ci_set(vdev->devh,
- vdev->vpaths[0].device_id);
+
+ vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
+
ret = request_irq((int) vdev->pdev->irq,
vxge_isr_napi,
IRQF_SHARED, vdev->desc[0], vdev);
@@ -2745,6 +2864,10 @@ static int vxge_open(struct net_device *dev)
}
netif_tx_start_all_queues(vdev->ndev);
+
+ /* configure CI */
+ vxge_config_ci_for_tti_rti(vdev);
+
goto out0;
out2:
@@ -3348,7 +3471,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
vxge_debug_init(VXGE_ERR,
"%s: vpath memory allocation failed",
vdev->ndev->name);
- ret = -ENODEV;
+ ret = -ENOMEM;
goto _out1;
}
@@ -3369,11 +3492,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
if (vdev->config.gro_enable)
ndev->features |= NETIF_F_GRO;
- if (register_netdev(ndev)) {
+ ret = register_netdev(ndev);
+ if (ret) {
vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
"%s: %s : device registration failed!",
ndev->name, __func__);
- ret = -ENODEV;
goto _out2;
}
@@ -3444,6 +3567,11 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev)
/* in 2.6 will call stop() if device is up */
unregister_netdev(dev);
+ kfree(vdev->vpaths);
+
+ /* we are safe to free it now */
+ free_netdev(dev);
+
vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
buf);
vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
@@ -3799,7 +3927,7 @@ static void __devinit vxge_device_config_init(
break;
case MSI_X:
- device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX;
+ device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
break;
}
@@ -4335,10 +4463,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit1;
}
- if (pci_request_region(pdev, 0, VXGE_DRIVER_NAME)) {
+ ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
+ if (ret) {
vxge_debug_init(VXGE_ERR,
"%s : request regions failed", __func__);
- ret = -ENODEV;
goto _exit1;
}
@@ -4446,7 +4574,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
if (!img[i].is_valid)
break;
vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
- "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i,
+ "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
VXGE_EPROM_IMG_MAJOR(img[i].version),
VXGE_EPROM_IMG_MINOR(img[i].version),
VXGE_EPROM_IMG_FIX(img[i].version),
@@ -4643,8 +4771,9 @@ _exit6:
_exit5:
vxge_device_unregister(hldev);
_exit4:
- pci_disable_sriov(pdev);
+ pci_set_drvdata(pdev, NULL);
vxge_hw_device_terminate(hldev);
+ pci_disable_sriov(pdev);
_exit3:
iounmap(attr.bar0);
_exit2:
@@ -4655,7 +4784,7 @@ _exit0:
kfree(ll_config);
kfree(device_config);
driver_config->config_dev_cnt--;
- pci_set_drvdata(pdev, NULL);
+ driver_config->total_dev_cnt--;
return ret;
}
@@ -4668,45 +4797,34 @@ _exit0:
static void __devexit vxge_remove(struct pci_dev *pdev)
{
struct __vxge_hw_device *hldev;
- struct vxgedev *vdev = NULL;
- struct net_device *dev;
- int i = 0;
+ struct vxgedev *vdev;
+ int i;
hldev = pci_get_drvdata(pdev);
-
if (hldev == NULL)
return;
- dev = hldev->ndev;
- vdev = netdev_priv(dev);
+ vdev = netdev_priv(hldev->ndev);
vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
-
vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
__func__);
- vxge_device_unregister(hldev);
- for (i = 0; i < vdev->no_of_vpath; i++) {
+ for (i = 0; i < vdev->no_of_vpath; i++)
vxge_free_mac_add_list(&vdev->vpaths[i]);
- vdev->vpaths[i].mcast_addr_cnt = 0;
- vdev->vpaths[i].mac_addr_cnt = 0;
- }
-
- kfree(vdev->vpaths);
+ vxge_device_unregister(hldev);
+ pci_set_drvdata(pdev, NULL);
+ /* Do not call pci_disable_sriov here, as it will break child devices */
+ vxge_hw_device_terminate(hldev);
iounmap(vdev->bar0);
-
- /* we are safe to free it now */
- free_netdev(dev);
+ pci_release_region(pdev, 0);
+ pci_disable_device(pdev);
+ driver_config->config_dev_cnt--;
+ driver_config->total_dev_cnt--;
vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
__func__, __LINE__);
-
- vxge_hw_device_terminate(hldev);
-
- pci_disable_device(pdev);
- pci_release_region(pdev, 0);
- pci_set_drvdata(pdev, NULL);
vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
__LINE__);
}
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 5746fed..40474f0 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -59,11 +59,13 @@
#define VXGE_TTI_LTIMER_VAL 1000
#define VXGE_T1A_TTI_LTIMER_VAL 80
#define VXGE_TTI_RTIMER_VAL 0
+#define VXGE_TTI_RTIMER_ADAPT_VAL 10
#define VXGE_T1A_TTI_RTIMER_VAL 400
#define VXGE_RTI_BTIMER_VAL 250
#define VXGE_RTI_LTIMER_VAL 100
#define VXGE_RTI_RTIMER_VAL 0
-#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
+#define VXGE_RTI_RTIMER_ADAPT_VAL 15
+#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
#define VXGE_ISR_POLLING_CNT 8
#define VXGE_MAX_CONFIG_DEV 0xFF
#define VXGE_EXEC_MODE_DISABLE 0
@@ -107,6 +109,14 @@
#define RTI_T1A_RX_UFC_C 50
#define RTI_T1A_RX_UFC_D 60
+/*
+ * The interrupt rate is maintained at 3k per second with the moderation
+ * parameters for most traffic but not all. This is the maximum interrupt
+ * count allowed per function with INTA or per vector in the case of
+ * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A.
+ */
+#define VXGE_T1A_MAX_INTERRUPT_COUNT 100
+#define VXGE_T1A_MAX_TX_INTERRUPT_COUNT 200
/* Milli secs timer period */
#define VXGE_TIMER_DELAY 10000
@@ -247,6 +257,11 @@ struct vxge_fifo {
int tx_steering_type;
int indicate_max_pkts;
+ /* Adaptive interrupt moderation parameters used in T1A */
+ unsigned long interrupt_count;
+ unsigned long jiffies;
+
+ u32 tx_vector_no;
/* Tx stats */
struct vxge_fifo_stats stats;
} ____cacheline_aligned;
@@ -271,6 +286,10 @@ struct vxge_ring {
*/
int driver_id;
+ /* Adaptive interrupt moderation parameters used in T1A */
+ unsigned long interrupt_count;
+ unsigned long jiffies;
+
/* copy of the flag indicating whether rx_csum is to be used */
u32 rx_csum:1,
rx_hwts:1;
@@ -286,7 +305,7 @@ struct vxge_ring {
int vlan_tag_strip;
struct vlan_group *vlgrp;
- int rx_vector_no;
+ u32 rx_vector_no;
enum vxge_hw_status last_status;
/* Rx stats */
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 4c10d6c..8674f33 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -218,6 +218,68 @@ exit:
return status;
}
+void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
+{
+ struct vxge_hw_vpath_reg __iomem *vp_reg;
+ struct vxge_hw_vp_config *config;
+ u64 val64;
+
+ if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
+ return;
+
+ vp_reg = fifo->vp_reg;
+ config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
+
+ if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
+ config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
+ val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+ fifo->tim_tti_cfg1_saved = val64;
+ writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+ }
+}
+
+void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
+{
+ u64 val64 = ring->tim_rti_cfg1_saved;
+
+ val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+ ring->tim_rti_cfg1_saved = val64;
+ writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
+}
+
+void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
+{
+ u64 val64 = fifo->tim_tti_cfg3_saved;
+ u64 timer = (fifo->rtimer * 1000) / 272;
+
+ val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
+ if (timer)
+ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
+ VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
+
+ writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
+ /* tti_cfg3_saved is not updated again because it is
+ * initialized at one place only - init time.
+ */
+}
+
+void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
+{
+ u64 val64 = ring->tim_rti_cfg3_saved;
+ u64 timer = (ring->rtimer * 1000) / 272;
+
+ val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
+ if (timer)
+ val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
+ VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
+
+ writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
+ /* rti_cfg3_saved is not updated again because it is
+ * initialized at one place only - init time.
+ */
+}
+
/**
* vxge_hw_channel_msix_mask - Mask MSIX Vector.
* @channeh: Channel for rx or tx handle
@@ -254,6 +316,23 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
}
/**
+ * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
+ * @channel: Channel for rx or tx handle
+ * @msix_id: MSI ID
+ *
+ * The function unmasks the msix interrupt for the given msix_id
+ * if configured in MSIX oneshot mode
+ *
+ * Returns: 0
+ */
+void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
+{
+ __vxge_hw_pio_mem_write32_upper(
+ (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
+ &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
+}
+
+/**
* vxge_hw_device_set_intr_type - Updates the configuration
* with new interrupt type.
* @hldev: HW device handle.
@@ -2191,19 +2270,14 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
if (vpath->hldev->config.intr_mode ==
VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
+ VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
+ 0, 32), &vp_reg->one_shot_vect0_en);
+ __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
0, 32), &vp_reg->one_shot_vect1_en);
- }
-
- if (vpath->hldev->config.intr_mode ==
- VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
0, 32), &vp_reg->one_shot_vect2_en);
-
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
- VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
- 0, 32), &vp_reg->one_shot_vect3_en);
}
}
@@ -2229,6 +2303,32 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
}
/**
+ * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
+ * @vp: Virtual Path handle.
+ * @msix_id: MSI ID
+ *
+ * The function clears the msix interrupt for the given msix_id
+ *
+ * Returns: 0,
+ * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
+ * status.
+ * See also:
+ */
+void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
+{
+ struct __vxge_hw_device *hldev = vp->vpath->hldev;
+
+ if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
+ __vxge_hw_pio_mem_write32_upper(
+ (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
+ &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
+ else
+ __vxge_hw_pio_mem_write32_upper(
+ (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
+ &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
+}
+
+/**
* vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
* @vp: Virtual Path handle.
* @msix_id: MSI ID
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index d48486d..9d9dfda 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -2142,6 +2142,10 @@ void vxge_hw_device_clear_tx_rx(
* Virtual Paths
*/
+void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring);
+
+void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo);
+
u32 vxge_hw_vpath_id(
struct __vxge_hw_vpath_handle *vpath_handle);
@@ -2245,6 +2249,8 @@ void
vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
int msix_id);
+void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id);
+
void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
void
@@ -2270,6 +2276,9 @@ void
vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
void
+vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id);
+
+void
vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
void **dtrh);
@@ -2282,7 +2291,8 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
int
vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
-void
-vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
+void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo);
+
+void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring);
#endif
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index ad2f99b..581e215 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -16,8 +16,8 @@
#define VXGE_VERSION_MAJOR "2"
#define VXGE_VERSION_MINOR "5"
-#define VXGE_VERSION_FIX "1"
-#define VXGE_VERSION_BUILD "22082"
+#define VXGE_VERSION_FIX "2"
+#define VXGE_VERSION_BUILD "22259"
#define VXGE_VERSION_FOR "k"
#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index 32bf79e..a9111e1 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -1945,7 +1945,8 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
static int ar9170_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
{
switch (action) {
case IEEE80211_AMPDU_RX_START:
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index e43210c..a6c6a46 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -108,12 +108,14 @@ enum ath_cipher {
* struct ath_ops - Register read/write operations
*
* @read: Register read
+ * @multi_read: Multiple register read
* @write: Register write
* @enable_write_buffer: Enable multiple register writes
* @write_flush: flush buffered register writes and disable buffering
*/
struct ath_ops {
unsigned int (*read)(void *, u32 reg_offset);
+ void (*multi_read)(void *, u32 *addr, u32 *val, u16 count);
void (*write)(void *, u32 val, u32 reg_offset);
void (*enable_write_buffer)(void *);
void (*write_flush) (void *);
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index e079331..e18a9aa 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -40,6 +40,17 @@ config ATH5K_DEBUG
modprobe ath5k debug=0x00000400
+config ATH5K_TRACER
+ bool "Atheros 5xxx tracer"
+ depends on ATH5K
+ depends on EVENT_TRACING
+ ---help---
+ Say Y here to enable tracepoints for the ath5k driver
+ using the kernel tracing infrastructure. Select this
+ option if you are interested in debugging the driver.
+
+ If unsure, say N.
+
config ATH5K_AHB
bool "Atheros 5xxx AHB bus support"
depends on (ATHEROS_AR231X && !PCI)
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index 707cde1..ae84b86 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -31,7 +31,8 @@ static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz)
*csz = L1_CACHE_BYTES >> 2;
}
-bool ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
+static bool
+ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
struct ath5k_softc *sc = common->priv;
struct platform_device *pdev = to_platform_device(sc->dev);
@@ -46,10 +47,10 @@ bool ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
eeprom += off;
if (eeprom > eeprom_end)
- return -EINVAL;
+ return false;
*data = *eeprom;
- return 0;
+ return true;
}
int ath5k_hw_read_srev(struct ath5k_hw *ah)
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 407e39c..e43175a 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -210,14 +210,9 @@
/* Initial values */
#define AR5K_INIT_CYCRSSI_THR1 2
-/* Tx retry limits */
-#define AR5K_INIT_SH_RETRY 10
-#define AR5K_INIT_LG_RETRY AR5K_INIT_SH_RETRY
-/* For station mode */
-#define AR5K_INIT_SSH_RETRY 32
-#define AR5K_INIT_SLG_RETRY AR5K_INIT_SSH_RETRY
-#define AR5K_INIT_TX_RETRY 10
-
+/* Tx retry limit defaults from standard */
+#define AR5K_INIT_RETRY_SHORT 7
+#define AR5K_INIT_RETRY_LONG 4
/* Slot time */
#define AR5K_INIT_SLOT_TIME_TURBO 6
@@ -1057,7 +1052,9 @@ struct ath5k_hw {
#define ah_modes ah_capabilities.cap_mode
#define ah_ee_version ah_capabilities.cap_eeprom.ee_version
- u32 ah_limit_tx_retries;
+ u8 ah_retry_long;
+ u8 ah_retry_short;
+
u8 ah_coverage_class;
bool ah_ack_bitrate_high;
u8 ah_bwmode;
@@ -1067,7 +1064,6 @@ struct ath5k_hw {
u8 ah_ant_mode;
u8 ah_tx_ant;
u8 ah_def_ant;
- bool ah_software_retry;
struct ath5k_capabilities ah_capabilities;
@@ -1250,6 +1246,8 @@ int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah,
enum ath5k_tx_queue queue_type,
struct ath5k_txq_info *queue_info);
+void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
+ unsigned int queue);
u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index cdac5cf..c71fdbb 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -118,8 +118,8 @@ int ath5k_hw_init(struct ath5k_softc *sc)
ah->ah_bwmode = AR5K_BWMODE_DEFAULT;
ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
ah->ah_imr = 0;
- ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
- ah->ah_software_retry = false;
+ ah->ah_retry_short = AR5K_INIT_RETRY_SHORT;
+ ah->ah_retry_long = AR5K_INIT_RETRY_LONG;
ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
ah->ah_noise_floor = -95; /* until first NF calibration is run */
sc->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 09ae4ef..dbc45e0 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -61,6 +61,9 @@
#include "debug.h"
#include "ani.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
int ath5k_modparam_nohwcrypt;
module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -242,73 +245,68 @@ static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *re
\********************/
/*
- * Convert IEEE channel number to MHz frequency.
- */
-static inline short
-ath5k_ieee2mhz(short chan)
-{
- if (chan <= 14 || chan >= 27)
- return ieee80211chan2mhz(chan);
- else
- return 2212 + chan * 20;
-}
-
-/*
* Returns true for the channel numbers used without all_channels modparam.
*/
-static bool ath5k_is_standard_channel(short chan)
+static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
{
- return ((chan <= 14) ||
- /* UNII 1,2 */
- ((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
+ if (band == IEEE80211_BAND_2GHZ && chan <= 14)
+ return true;
+
+ return /* UNII 1,2 */
+ (((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
/* midband */
((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
/* UNII-3 */
- ((chan & 3) == 1 && chan >= 149 && chan <= 165));
+ ((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
+ /* 802.11j 5.030-5.080 GHz (20MHz) */
+ (chan == 8 || chan == 12 || chan == 16) ||
+ /* 802.11j 4.9GHz (20MHz) */
+ (chan == 184 || chan == 188 || chan == 192 || chan == 196));
}
static unsigned int
-ath5k_copy_channels(struct ath5k_hw *ah,
- struct ieee80211_channel *channels,
- unsigned int mode,
- unsigned int max)
+ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
+ unsigned int mode, unsigned int max)
{
- unsigned int i, count, size, chfreq, freq, ch;
-
- if (!test_bit(mode, ah->ah_modes))
- return 0;
+ unsigned int count, size, chfreq, freq, ch;
+ enum ieee80211_band band;
switch (mode) {
case AR5K_MODE_11A:
/* 1..220, but 2GHz frequencies are filtered by check_channel */
- size = 220 ;
+ size = 220;
chfreq = CHANNEL_5GHZ;
+ band = IEEE80211_BAND_5GHZ;
break;
case AR5K_MODE_11B:
case AR5K_MODE_11G:
size = 26;
chfreq = CHANNEL_2GHZ;
+ band = IEEE80211_BAND_2GHZ;
break;
default:
ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n");
return 0;
}
- for (i = 0, count = 0; i < size && max > 0; i++) {
- ch = i + 1 ;
- freq = ath5k_ieee2mhz(ch);
+ count = 0;
+ for (ch = 1; ch <= size && count < max; ch++) {
+ freq = ieee80211_channel_to_frequency(ch, band);
+
+ if (freq == 0) /* mapping failed - not a standard channel */
+ continue;
/* Check if channel is supported by the chipset */
if (!ath5k_channel_ok(ah, freq, chfreq))
continue;
- if (!modparam_all_channels && !ath5k_is_standard_channel(ch))
+ if (!modparam_all_channels &&
+ !ath5k_is_standard_channel(ch, band))
continue;
/* Write channel info and increment counter */
channels[count].center_freq = freq;
- channels[count].band = (chfreq == CHANNEL_2GHZ) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ channels[count].band = band;
switch (mode) {
case AR5K_MODE_11A:
case AR5K_MODE_11G:
@@ -319,7 +317,6 @@ ath5k_copy_channels(struct ath5k_hw *ah,
}
count++;
- max--;
}
return count;
@@ -364,7 +361,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
sband->n_bitrates = 12;
sband->channels = sc->channels;
- sband->n_channels = ath5k_copy_channels(ah, sband->channels,
+ sband->n_channels = ath5k_setup_channels(ah, sband->channels,
AR5K_MODE_11G, max_c);
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
@@ -390,7 +387,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
}
sband->channels = sc->channels;
- sband->n_channels = ath5k_copy_channels(ah, sband->channels,
+ sband->n_channels = ath5k_setup_channels(ah, sband->channels,
AR5K_MODE_11B, max_c);
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
@@ -410,7 +407,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
sband->n_bitrates = 8;
sband->channels = &sc->channels[count_c];
- sband->n_channels = ath5k_copy_channels(ah, sband->channels,
+ sband->n_channels = ath5k_setup_channels(ah, sband->channels,
AR5K_MODE_11A, max_c);
hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
@@ -445,18 +442,6 @@ ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
return ath5k_reset(sc, chan, true);
}
-static void
-ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode)
-{
- sc->curmode = mode;
-
- if (mode == AR5K_MODE_11A) {
- sc->curband = &sc->sbands[IEEE80211_BAND_5GHZ];
- } else {
- sc->curband = &sc->sbands[IEEE80211_BAND_2GHZ];
- }
-}
-
struct ath_vif_iter_data {
const u8 *hw_macaddr;
u8 mask[ETH_ALEN];
@@ -569,7 +554,7 @@ ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
"hw_rix out of bounds: %x\n", hw_rix))
return 0;
- rix = sc->rate_idx[sc->curband->band][hw_rix];
+ rix = sc->rate_idx[sc->curchan->band][hw_rix];
if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
rix = 0;
@@ -1379,7 +1364,7 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
rxs->flag |= RX_FLAG_TSFT;
rxs->freq = sc->curchan->center_freq;
- rxs->band = sc->curband->band;
+ rxs->band = sc->curchan->band;
rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi;
@@ -1394,10 +1379,10 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb,
rxs->flag |= ath5k_rx_decrypted(sc, skb, rs);
if (rxs->rate_idx >= 0 && rs->rs_rate ==
- sc->curband->bitrates[rxs->rate_idx].hw_value_short)
+ sc->sbands[sc->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
rxs->flag |= RX_FLAG_SHORTPRE;
- ath5k_debug_dump_skb(sc, skb, "RX ", 0);
+ trace_ath5k_rx(sc, skb);
ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi);
@@ -1542,7 +1527,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
unsigned long flags;
int padsize;
- ath5k_debug_dump_skb(sc, skb, "TX ", 1);
+ trace_ath5k_tx(sc, skb, txq);
/*
* The hardware expects the header padded to 4 byte boundaries.
@@ -1591,7 +1576,7 @@ drop_packet:
static void
ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
- struct ath5k_tx_status *ts)
+ struct ath5k_txq *txq, struct ath5k_tx_status *ts)
{
struct ieee80211_tx_info *info;
int i;
@@ -1643,6 +1628,7 @@ ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb,
else
sc->stats.antenna_tx[0]++; /* invalid */
+ trace_ath5k_tx_complete(sc, skb, txq, ts);
ieee80211_tx_status(sc->hw, skb);
}
@@ -1679,7 +1665,7 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
dma_unmap_single(sc->dev, bf->skbaddr, skb->len,
DMA_TO_DEVICE);
- ath5k_tx_frame_completed(sc, skb, &ts);
+ ath5k_tx_frame_completed(sc, skb, txq, &ts);
}
/*
@@ -1821,8 +1807,6 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
goto out;
}
- ath5k_debug_dump_skb(sc, skb, "BC ", 1);
-
ath5k_txbuf_free_skb(sc, avf->bbuf);
avf->bbuf->skb = skb;
ret = ath5k_beacon_setup(sc, avf->bbuf);
@@ -1917,6 +1901,8 @@ ath5k_beacon_send(struct ath5k_softc *sc)
sc->opmode == NL80211_IFTYPE_MESH_POINT)
ath5k_beacon_update(sc->hw, vif);
+ trace_ath5k_tx(sc, bf->skb, &sc->txqs[sc->bhalq]);
+
ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr);
ath5k_hw_start_tx_dma(ah, sc->bhalq);
ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
@@ -2417,7 +2403,8 @@ ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops)
/* set up multi-rate retry capabilities */
if (sc->ah->ah_version == AR5K_AR5212) {
hw->max_rates = 4;
- hw->max_rate_tries = 11;
+ hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
+ AR5K_INIT_RETRY_LONG);
}
hw->vif_data_size = sizeof(struct ath5k_vif);
@@ -2554,7 +2541,6 @@ ath5k_init_hw(struct ath5k_softc *sc)
* and then setup of the interrupt mask.
*/
sc->curchan = sc->hw->conf.channel;
- sc->curband = &sc->sbands[sc->curchan->band];
sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
@@ -2681,10 +2667,8 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
* so we should also free any remaining
* tx buffers */
ath5k_drain_tx_buffs(sc);
- if (chan) {
+ if (chan)
sc->curchan = chan;
- sc->curband = &sc->sbands[chan->band];
- }
ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL,
skip_pcu);
if (ret) {
@@ -2782,12 +2766,6 @@ ath5k_init(struct ieee80211_hw *hw)
goto err;
}
- /* NB: setup here so ath5k_rate_update is happy */
- if (test_bit(AR5K_MODE_11A, ah->ah_modes))
- ath5k_setcurmode(sc, AR5K_MODE_11A);
- else
- ath5k_setcurmode(sc, AR5K_MODE_11B);
-
/*
* Allocate tx+rx descriptors and populate the lists.
*/
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 6d51147..8f919dc 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -183,8 +183,6 @@ struct ath5k_softc {
enum nl80211_iftype opmode;
struct ath5k_hw *ah; /* Atheros HW */
- struct ieee80211_supported_band *curband;
-
#ifdef CONFIG_ATH5K_DEBUG
struct ath5k_dbg_info debug; /* debug info */
#endif /* CONFIG_ATH5K_DEBUG */
@@ -202,7 +200,6 @@ struct ath5k_softc {
#define ATH_STAT_STARTED 4 /* opened & irqs enabled */
unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
- unsigned int curmode; /* current phy mode */
struct ieee80211_channel *curchan; /* current h/w channel */
u16 nvifs;
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index 31cad80e..f77e8a7 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -32,23 +32,24 @@
*/
int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
{
+ struct ath5k_capabilities *caps = &ah->ah_capabilities;
u16 ee_header;
/* Capabilities stored in the EEPROM */
- ee_header = ah->ah_capabilities.cap_eeprom.ee_header;
+ ee_header = caps->cap_eeprom.ee_header;
if (ah->ah_version == AR5K_AR5210) {
/*
* Set radio capabilities
* (The AR5110 only supports the middle 5GHz band)
*/
- ah->ah_capabilities.cap_range.range_5ghz_min = 5120;
- ah->ah_capabilities.cap_range.range_5ghz_max = 5430;
- ah->ah_capabilities.cap_range.range_2ghz_min = 0;
- ah->ah_capabilities.cap_range.range_2ghz_max = 0;
+ caps->cap_range.range_5ghz_min = 5120;
+ caps->cap_range.range_5ghz_max = 5430;
+ caps->cap_range.range_2ghz_min = 0;
+ caps->cap_range.range_2ghz_max = 0;
/* Set supported modes */
- __set_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode);
+ __set_bit(AR5K_MODE_11A, caps->cap_mode);
} else {
/*
* XXX The tranceiver supports frequencies from 4920 to 6100GHz
@@ -56,9 +57,8 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
* XXX current ieee80211 implementation because the IEEE
* XXX channel mapping does not support negative channel
* XXX numbers (2312MHz is channel -19). Of course, this
- * XXX doesn't matter because these channels are out of range
- * XXX but some regulation domains like MKK (Japan) will
- * XXX support frequencies somewhere around 4.8GHz.
+ * XXX doesn't matter because these channels are out of the
+ * XXX legal range.
*/
/*
@@ -66,13 +66,14 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
*/
if (AR5K_EEPROM_HDR_11A(ee_header)) {
- /* 4920 */
- ah->ah_capabilities.cap_range.range_5ghz_min = 5005;
- ah->ah_capabilities.cap_range.range_5ghz_max = 6100;
+ if (ath_is_49ghz_allowed(caps->cap_eeprom.ee_regdomain))
+ caps->cap_range.range_5ghz_min = 4920;
+ else
+ caps->cap_range.range_5ghz_min = 5005;
+ caps->cap_range.range_5ghz_max = 6100;
/* Set supported modes */
- __set_bit(AR5K_MODE_11A,
- ah->ah_capabilities.cap_mode);
+ __set_bit(AR5K_MODE_11A, caps->cap_mode);
}
/* Enable 802.11b if a 2GHz capable radio (2111/5112) is
@@ -81,32 +82,29 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
(AR5K_EEPROM_HDR_11G(ee_header) &&
ah->ah_version != AR5K_AR5211)) {
/* 2312 */
- ah->ah_capabilities.cap_range.range_2ghz_min = 2412;
- ah->ah_capabilities.cap_range.range_2ghz_max = 2732;
+ caps->cap_range.range_2ghz_min = 2412;
+ caps->cap_range.range_2ghz_max = 2732;
if (AR5K_EEPROM_HDR_11B(ee_header))
- __set_bit(AR5K_MODE_11B,
- ah->ah_capabilities.cap_mode);
+ __set_bit(AR5K_MODE_11B, caps->cap_mode);
if (AR5K_EEPROM_HDR_11G(ee_header) &&
ah->ah_version != AR5K_AR5211)
- __set_bit(AR5K_MODE_11G,
- ah->ah_capabilities.cap_mode);
+ __set_bit(AR5K_MODE_11G, caps->cap_mode);
}
}
/* Set number of supported TX queues */
if (ah->ah_version == AR5K_AR5210)
- ah->ah_capabilities.cap_queues.q_tx_num =
- AR5K_NUM_TX_QUEUES_NOQCU;
+ caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES_NOQCU;
else
- ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
+ caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
/* newer hardware has PHY error counters */
if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
- ah->ah_capabilities.cap_has_phyerr_counters = true;
+ caps->cap_has_phyerr_counters = true;
else
- ah->ah_capabilities.cap_has_phyerr_counters = false;
+ caps->cap_has_phyerr_counters = false;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index d2f84d7..0230f30 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -308,8 +308,6 @@ static const struct {
{ ATH5K_DEBUG_CALIBRATE, "calib", "periodic calibration" },
{ ATH5K_DEBUG_TXPOWER, "txpower", "transmit power setting" },
{ ATH5K_DEBUG_LED, "led", "LED management" },
- { ATH5K_DEBUG_DUMP_RX, "dumprx", "print received skb content" },
- { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" },
{ ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
{ ATH5K_DEBUG_DMA, "dma", "dma start/stop" },
{ ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" },
@@ -1036,24 +1034,6 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah)
}
void
-ath5k_debug_dump_skb(struct ath5k_softc *sc,
- struct sk_buff *skb, const char *prefix, int tx)
-{
- char buf[16];
-
- if (likely(!((tx && (sc->debug.level & ATH5K_DEBUG_DUMP_TX)) ||
- (!tx && (sc->debug.level & ATH5K_DEBUG_DUMP_RX)))))
- return;
-
- snprintf(buf, sizeof(buf), "%s %s", wiphy_name(sc->hw->wiphy), prefix);
-
- print_hex_dump_bytes(buf, DUMP_PREFIX_NONE, skb->data,
- min(200U, skb->len));
-
- printk(KERN_DEBUG "\n");
-}
-
-void
ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf)
{
struct ath5k_desc *ds = bf->desc;
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index 3e34428..b0355ae 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -116,8 +116,6 @@ enum ath5k_debug_level {
ATH5K_DEBUG_CALIBRATE = 0x00000020,
ATH5K_DEBUG_TXPOWER = 0x00000040,
ATH5K_DEBUG_LED = 0x00000080,
- ATH5K_DEBUG_DUMP_RX = 0x00000100,
- ATH5K_DEBUG_DUMP_TX = 0x00000200,
ATH5K_DEBUG_DUMPBANDS = 0x00000400,
ATH5K_DEBUG_DMA = 0x00000800,
ATH5K_DEBUG_ANI = 0x00002000,
@@ -152,10 +150,6 @@ void
ath5k_debug_dump_bands(struct ath5k_softc *sc);
void
-ath5k_debug_dump_skb(struct ath5k_softc *sc,
- struct sk_buff *skb, const char *prefix, int tx);
-
-void
ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf);
#else /* no debugging */
@@ -182,10 +176,6 @@ static inline void
ath5k_debug_dump_bands(struct ath5k_softc *sc) {}
static inline void
-ath5k_debug_dump_skb(struct ath5k_softc *sc,
- struct sk_buff *skb, const char *prefix, int tx) {}
-
-static inline void
ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf) {}
#endif /* ifdef CONFIG_ATH5K_DEBUG */
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 80e62560..b6561f7 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -72,7 +72,6 @@ static int
ath5k_eeprom_init_header(struct ath5k_hw *ah)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
- int ret;
u16 val;
u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX;
@@ -192,7 +191,7 @@ static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset,
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 o = *offset;
u16 val;
- int ret, i = 0;
+ int i = 0;
AR5K_EEPROM_READ(o++, val);
ee->ee_switch_settling[mode] = (val >> 8) & 0x7f;
@@ -252,7 +251,6 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 o = *offset;
u16 val;
- int ret;
ee->ee_n_piers[mode] = 0;
AR5K_EEPROM_READ(o++, val);
@@ -515,7 +513,6 @@ ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max,
int o = *offset;
int i = 0;
u8 freq1, freq2;
- int ret;
u16 val;
ee->ee_n_piers[mode] = 0;
@@ -551,7 +548,7 @@ ath5k_eeprom_init_11a_pcal_freq(struct ath5k_hw *ah, int offset)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
struct ath5k_chan_pcal_info *pcal = ee->ee_pwr_cal_a;
- int i, ret;
+ int i;
u16 val;
u8 mask;
@@ -970,7 +967,6 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
u32 offset;
u8 i, c;
u16 val;
- int ret;
u8 pd_gains = 0;
/* Count how many curves we have and
@@ -1228,7 +1224,7 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
struct ath5k_chan_pcal_info *chinfo;
u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
u32 offset;
- int idx, i, ret;
+ int idx, i;
u16 val;
u8 pd_gains = 0;
@@ -1419,7 +1415,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
u8 *rate_target_pwr_num;
u32 offset;
u16 val;
- int ret, i;
+ int i;
offset = AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1);
rate_target_pwr_num = &ee->ee_rate_target_pwr_num[mode];
@@ -1593,7 +1589,7 @@ ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah)
struct ath5k_edge_power *rep;
unsigned int fmask, pmask;
unsigned int ctl_mode;
- int ret, i, j;
+ int i, j;
u32 offset;
u16 val;
@@ -1733,16 +1729,12 @@ int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
u8 mac_d[ETH_ALEN] = {};
u32 total, offset;
u16 data;
- int octet, ret;
+ int octet;
- ret = ath5k_hw_nvram_read(ah, 0x20, &data);
- if (ret)
- return ret;
+ AR5K_EEPROM_READ(0x20, data);
for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
- ret = ath5k_hw_nvram_read(ah, offset, &data);
- if (ret)
- return ret;
+ AR5K_EEPROM_READ(offset, data);
total += data;
mac_d[octet + 1] = data & 0xff;
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index 7c09e15..6511c27 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -241,9 +241,8 @@ enum ath5k_eeprom_freq_bands{
#define AR5K_SPUR_SYMBOL_WIDTH_TURBO_100Hz 6250
#define AR5K_EEPROM_READ(_o, _v) do { \
- ret = ath5k_hw_nvram_read(ah, (_o), &(_v)); \
- if (ret) \
- return ret; \
+ if (!ath5k_hw_nvram_read(ah, (_o), &(_v))) \
+ return -EIO; \
} while (0)
#define AR5K_EEPROM_READ_HDR(_o, _v) \
@@ -269,29 +268,6 @@ enum ath5k_ctl_mode {
AR5K_CTL_MODE_M = 15,
};
-/* Default CTL ids for the 3 main reg domains.
- * Atheros only uses these by default but vendors
- * can have up to 32 different CTLs for different
- * scenarios. Note that theese values are ORed with
- * the mode id (above) so we can have up to 24 CTL
- * datasets out of these 3 main regdomains. That leaves
- * 8 ids that can be used by vendors and since 0x20 is
- * missing from HAL sources i guess this is the set of
- * custom CTLs vendors can use. */
-#define AR5K_CTL_FCC 0x10
-#define AR5K_CTL_CUSTOM 0x20
-#define AR5K_CTL_ETSI 0x30
-#define AR5K_CTL_MKK 0x40
-
-/* Indicates a CTL with only mode set and
- * no reg domain mapping, such CTLs are used
- * for world roaming domains or simply when
- * a reg domain is not set */
-#define AR5K_CTL_NO_REGDOMAIN 0xf0
-
-/* Indicates an empty (invalid) CTL */
-#define AR5K_CTL_NO_CTL 0xff
-
/* Per channel calibration data, used for power table setup */
struct ath5k_chan_pcal_info_rf5111 {
/* Power levels in half dbm units
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index d76d68c..36a5199 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -226,6 +226,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
struct ath5k_hw *ah = sc->ah;
struct ieee80211_conf *conf = &hw->conf;
int ret = 0;
+ int i;
mutex_lock(&sc->lock);
@@ -243,6 +244,14 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
}
+ if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
+ ah->ah_retry_long = conf->long_frame_max_tx_count;
+ ah->ah_retry_short = conf->short_frame_max_tx_count;
+
+ for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++)
+ ath5k_hw_set_tx_retry_limits(ah, i);
+ }
+
/* TODO:
* 1) Move this on config_interface and handle each case
* separately eg. when we have only one STA vif, use
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index 7f8c5b0..66598a0 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -69,7 +69,8 @@ static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz)
/*
* Read from eeprom
*/
-bool ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
+static bool
+ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
{
struct ath5k_hw *ah = (struct ath5k_hw *) common->ah;
u32 status, timeout;
@@ -90,15 +91,15 @@ bool ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
if (status & AR5K_EEPROM_STAT_RDDONE) {
if (status & AR5K_EEPROM_STAT_RDERR)
- return -EIO;
+ return false;
*data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) &
0xffff);
- return 0;
+ return true;
}
udelay(15);
}
- return -ETIMEDOUT;
+ return false;
}
int ath5k_hw_read_srev(struct ath5k_hw *ah)
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 78c26fd..d673ab2 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -282,6 +282,34 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah)
return 0;
}
+/*
+ * Wait for synth to settle
+ */
+static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ /*
+ * On 5211+ read activation -> rx delay
+ * and use it (100ns steps).
+ */
+ if (ah->ah_version != AR5K_AR5210) {
+ u32 delay;
+ delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
+ AR5K_PHY_RX_DELAY_M;
+ delay = (channel->hw_value & CHANNEL_CCK) ?
+ ((delay << 2) / 22) : (delay / 10);
+ if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
+ delay = delay << 1;
+ if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
+ delay = delay << 2;
+ /* XXX: /2 on turbo ? Let's be safe
+ * for now */
+ udelay(100 + delay);
+ } else {
+ mdelay(1);
+ }
+}
+
/**********************\
* RF Gain optimization *
@@ -3237,6 +3265,13 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
/* Failed */
if (i >= 100)
return -EIO;
+
+ /* Set channel and wait for synth */
+ ret = ath5k_hw_channel(ah, channel);
+ if (ret)
+ return ret;
+
+ ath5k_hw_wait_for_synth(ah, channel);
}
/*
@@ -3251,13 +3286,53 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
if (ret)
return ret;
+ /* Write OFDM timings on 5212*/
+ if (ah->ah_version == AR5K_AR5212 &&
+ channel->hw_value & CHANNEL_OFDM) {
+
+ ret = ath5k_hw_write_ofdm_timings(ah, channel);
+ if (ret)
+ return ret;
+
+ /* Spur info is available only from EEPROM versions
+ * greater than 5.3, but the EEPROM routines will use
+ * static values for older versions */
+ if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
+ ath5k_hw_set_spur_mitigation_filter(ah,
+ channel);
+ }
+
+ /* If we used fast channel switching
+ * we are done, release RF bus and
+ * fire up NF calibration.
+ *
+ * Note: Only NF calibration due to
+ * channel change, not AGC calibration
+ * since AGC is still running !
+ */
+ if (fast) {
+ /*
+ * Release RF Bus grant
+ */
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
+ AR5K_PHY_RFBUS_REQ_REQUEST);
+
+ /*
+ * Start NF calibration
+ */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_NF);
+
+ return ret;
+ }
+
/*
* For 5210 we do all initialization using
* initvals, so we don't have to modify
* any settings (5210 also only supports
* a/aturbo modes)
*/
- if ((ah->ah_version != AR5K_AR5210) && !fast) {
+ if (ah->ah_version != AR5K_AR5210) {
/*
* Write initial RF gain settings
@@ -3276,22 +3351,6 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
if (ret)
return ret;
- /* Write OFDM timings on 5212*/
- if (ah->ah_version == AR5K_AR5212 &&
- channel->hw_value & CHANNEL_OFDM) {
-
- ret = ath5k_hw_write_ofdm_timings(ah, channel);
- if (ret)
- return ret;
-
- /* Spur info is available only from EEPROM versions
- * greater than 5.3, but the EEPROM routines will use
- * static values for older versions */
- if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
- ath5k_hw_set_spur_mitigation_filter(ah,
- channel);
- }
-
/*Enable/disable 802.11b mode on 5111
(enable 2111 frequency converter + CCK)*/
if (ah->ah_radio == AR5K_RF5111) {
@@ -3322,47 +3381,20 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
*/
ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
+ ath5k_hw_wait_for_synth(ah, channel);
+
/*
- * On 5211+ read activation -> rx delay
- * and use it.
+ * Perform ADC test to see if baseband is ready
+ * Set tx hold and check adc test register
*/
- if (ah->ah_version != AR5K_AR5210) {
- u32 delay;
- delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
- AR5K_PHY_RX_DELAY_M;
- delay = (channel->hw_value & CHANNEL_CCK) ?
- ((delay << 2) / 22) : (delay / 10);
- if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
- delay = delay << 1;
- if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
- delay = delay << 2;
- /* XXX: /2 on turbo ? Let's be safe
- * for now */
- udelay(100 + delay);
- } else {
- mdelay(1);
- }
-
- if (fast)
- /*
- * Release RF Bus grant
- */
- AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
- AR5K_PHY_RFBUS_REQ_REQUEST);
- else {
- /*
- * Perform ADC test to see if baseband is ready
- * Set tx hold and check adc test register
- */
- phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
- ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
- for (i = 0; i <= 20; i++) {
- if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
- break;
- udelay(200);
- }
- ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
+ phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
+ ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
+ for (i = 0; i <= 20; i++) {
+ if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
+ break;
+ udelay(200);
}
+ ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
/*
* Start automatic gain control calibration
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 2c9c9e7..3343fb9 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -228,24 +228,9 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
/*
* Set tx retry limits on DCU
*/
-static void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
- unsigned int queue)
+void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
+ unsigned int queue)
{
- u32 retry_lg, retry_sh;
-
- /*
- * Calculate and set retry limits
- */
- if (ah->ah_software_retry) {
- /* XXX Need to test this */
- retry_lg = ah->ah_limit_tx_retries;
- retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ?
- AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg;
- } else {
- retry_lg = AR5K_INIT_LG_RETRY;
- retry_sh = AR5K_INIT_SH_RETRY;
- }
-
/* Single data queue on AR5210 */
if (ah->ah_version == AR5K_AR5210) {
struct ath5k_txq_info *tq = &ah->ah_txq[queue];
@@ -255,25 +240,26 @@ static void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
ath5k_hw_reg_write(ah,
(tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
- | AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
- AR5K_NODCU_RETRY_LMT_SLG_RETRY)
- | AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
- AR5K_NODCU_RETRY_LMT_SSH_RETRY)
- | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY)
- | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY),
+ | AR5K_REG_SM(ah->ah_retry_long,
+ AR5K_NODCU_RETRY_LMT_SLG_RETRY)
+ | AR5K_REG_SM(ah->ah_retry_short,
+ AR5K_NODCU_RETRY_LMT_SSH_RETRY)
+ | AR5K_REG_SM(ah->ah_retry_long,
+ AR5K_NODCU_RETRY_LMT_LG_RETRY)
+ | AR5K_REG_SM(ah->ah_retry_short,
+ AR5K_NODCU_RETRY_LMT_SH_RETRY),
AR5K_NODCU_RETRY_LMT);
/* DCU on AR5211+ */
} else {
ath5k_hw_reg_write(ah,
- AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
- AR5K_DCU_RETRY_LMT_SLG_RETRY) |
- AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
- AR5K_DCU_RETRY_LMT_SSH_RETRY) |
- AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) |
- AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY),
+ AR5K_REG_SM(ah->ah_retry_long,
+ AR5K_DCU_RETRY_LMT_RTS)
+ | AR5K_REG_SM(ah->ah_retry_long,
+ AR5K_DCU_RETRY_LMT_STA_RTS)
+ | AR5K_REG_SM(max(ah->ah_retry_long, ah->ah_retry_short),
+ AR5K_DCU_RETRY_LMT_STA_DATA),
AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
}
- return;
}
/**
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index fd14b91..e1c9abd 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -686,16 +686,15 @@
/*
* DCU retry limit registers
+ * all these fields don't allow zero values
*/
#define AR5K_DCU_RETRY_LMT_BASE 0x1080 /* Register Address -Queue0 DCU_RETRY_LMT */
-#define AR5K_DCU_RETRY_LMT_SH_RETRY 0x0000000f /* Short retry limit mask */
-#define AR5K_DCU_RETRY_LMT_SH_RETRY_S 0
-#define AR5K_DCU_RETRY_LMT_LG_RETRY 0x000000f0 /* Long retry limit mask */
-#define AR5K_DCU_RETRY_LMT_LG_RETRY_S 4
-#define AR5K_DCU_RETRY_LMT_SSH_RETRY 0x00003f00 /* Station short retry limit mask (?) */
-#define AR5K_DCU_RETRY_LMT_SSH_RETRY_S 8
-#define AR5K_DCU_RETRY_LMT_SLG_RETRY 0x000fc000 /* Station long retry limit mask (?) */
-#define AR5K_DCU_RETRY_LMT_SLG_RETRY_S 14
+#define AR5K_DCU_RETRY_LMT_RTS 0x0000000f /* RTS failure limit. Transmission fails if no CTS is received for this number of times */
+#define AR5K_DCU_RETRY_LMT_RTS_S 0
+#define AR5K_DCU_RETRY_LMT_STA_RTS 0x00003f00 /* STA RTS failure limit. If exceeded CW reset */
+#define AR5K_DCU_RETRY_LMT_STA_RTS_S 8
+#define AR5K_DCU_RETRY_LMT_STA_DATA 0x000fc000 /* STA data failure limit. If exceeded CW reset. */
+#define AR5K_DCU_RETRY_LMT_STA_DATA_S 14
#define AR5K_QUEUE_DFS_RETRY_LIMIT(_q) AR5K_QUEUE_REG(AR5K_DCU_RETRY_LMT_BASE, _q)
/*
diff --git a/drivers/net/wireless/ath/ath5k/trace.h b/drivers/net/wireless/ath/ath5k/trace.h
new file mode 100644
index 0000000..2de68ad
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/trace.h
@@ -0,0 +1,107 @@
+#if !defined(__TRACE_ATH5K_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __TRACE_ATH5K_H
+
+#include <linux/tracepoint.h>
+#include "base.h"
+
+#ifndef CONFIG_ATH5K_TRACER
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif
+
+struct sk_buff;
+
+#define PRIV_ENTRY __field(struct ath5k_softc *, priv)
+#define PRIV_ASSIGN __entry->priv = priv
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath5k
+
+TRACE_EVENT(ath5k_rx,
+ TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb),
+ TP_ARGS(priv, skb),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __field(unsigned long, skbaddr)
+ __dynamic_array(u8, frame, skb->len)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->skbaddr = (unsigned long) skb;
+ memcpy(__get_dynamic_array(frame), skb->data, skb->len);
+ ),
+ TP_printk(
+ "[%p] RX skb=%lx", __entry->priv, __entry->skbaddr
+ )
+);
+
+TRACE_EVENT(ath5k_tx,
+ TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb,
+ struct ath5k_txq *q),
+
+ TP_ARGS(priv, skb, q),
+
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __field(unsigned long, skbaddr)
+ __field(u8, qnum)
+ __dynamic_array(u8, frame, skb->len)
+ ),
+
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->skbaddr = (unsigned long) skb;
+ __entry->qnum = (u8) q->qnum;
+ memcpy(__get_dynamic_array(frame), skb->data, skb->len);
+ ),
+
+ TP_printk(
+ "[%p] TX skb=%lx q=%d", __entry->priv, __entry->skbaddr,
+ __entry->qnum
+ )
+);
+
+TRACE_EVENT(ath5k_tx_complete,
+ TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb,
+ struct ath5k_txq *q, struct ath5k_tx_status *ts),
+
+ TP_ARGS(priv, skb, q, ts),
+
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __field(unsigned long, skbaddr)
+ __field(u8, qnum)
+ __field(u8, ts_status)
+ __field(s8, ts_rssi)
+ __field(u8, ts_antenna)
+ ),
+
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->skbaddr = (unsigned long) skb;
+ __entry->qnum = (u8) q->qnum;
+ __entry->ts_status = ts->ts_status;
+ __entry->ts_rssi = ts->ts_rssi;
+ __entry->ts_antenna = ts->ts_antenna;
+ ),
+
+ TP_printk(
+ "[%p] TX end skb=%lx q=%d stat=%x rssi=%d ant=%x",
+ __entry->priv, __entry->skbaddr, __entry->qnum,
+ __entry->ts_status, __entry->ts_rssi, __entry->ts_antenna
+ )
+);
+
+#endif /* __TRACE_ATH5K_H */
+
+#ifdef CONFIG_ATH5K_TRACER
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/net/wireless/ath/ath5k
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index aca0162..4d66ca8 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -4,7 +4,6 @@ ath9k-y += beacon.o \
main.o \
recv.o \
xmit.o \
- virtual.o \
ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
ath9k-$(CONFIG_PCI) += pci.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 25a6e44..9936721 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -54,7 +54,6 @@ static struct ath_bus_ops ath_ahb_bus_ops = {
static int ath_ahb_probe(struct platform_device *pdev)
{
void __iomem *mem;
- struct ath_wiphy *aphy;
struct ath_softc *sc;
struct ieee80211_hw *hw;
struct resource *res;
@@ -92,8 +91,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
irq = res->start;
- hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) +
- sizeof(struct ath_softc), &ath9k_ops);
+ hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
if (hw == NULL) {
dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
ret = -ENOMEM;
@@ -103,11 +101,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
SET_IEEE80211_DEV(hw, &pdev->dev);
platform_set_drvdata(pdev, hw);
- aphy = hw->priv;
- sc = (struct ath_softc *) (aphy + 1);
- aphy->sc = sc;
- aphy->hw = hw;
- sc->pri_wiphy = aphy;
+ sc = hw->priv;
sc->hw = hw;
sc->dev = &pdev->dev;
sc->mem = mem;
@@ -151,8 +145,7 @@ static int ath_ahb_remove(struct platform_device *pdev)
struct ieee80211_hw *hw = platform_get_drvdata(pdev);
if (hw) {
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
void __iomem *mem = sc->mem;
ath9k_deinit_device(sc);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 5e300bd..76388c6 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -805,7 +805,10 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
- if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) {
+ if (AR_SREV_9271(ah)) {
+ if (!ar9285_hw_cl_cal(ah, chan))
+ return false;
+ } else if (AR_SREV_9285_12_OR_LATER(ah)) {
if (!ar9285_hw_clc(ah, chan))
return false;
} else {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 4819747..4a92718 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3673,7 +3673,7 @@ static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
return;
reg_pmu_set = (5 << 1) | (7 << 4) | (1 << 8) |
- (7 << 14) | (6 << 17) | (1 << 20) |
+ (2 << 14) | (6 << 17) | (1 << 20) |
(3 << 24) | (1 << 28);
REG_WRITE(ah, AR_PHY_PMU1, reg_pmu_set);
@@ -3959,19 +3959,19 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
{
#define POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
/* make sure forced gain is not set */
- REG_WRITE(ah, 0xa458, 0);
+ REG_WRITE(ah, AR_PHY_TX_FORCED_GAIN, 0);
/* Write the OFDM power per rate set */
/* 6 (LSB), 9, 12, 18 (MSB) */
- REG_WRITE(ah, 0xa3c0,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(0),
POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 16) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0));
/* 24 (LSB), 36, 48, 54 (MSB) */
- REG_WRITE(ah, 0xa3c4,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(1),
POW_SM(pPwrArray[ALL_TARGET_LEGACY_54], 24) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_48], 16) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_36], 8) |
@@ -3980,14 +3980,14 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
/* Write the CCK power per rate set */
/* 1L (LSB), reserved, 2L, 2S (MSB) */
- REG_WRITE(ah, 0xa3c8,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(2),
POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 24) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) |
/* POW_SM(txPowerTimes2, 8) | this is reserved for AR9003 */
POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0));
/* 5.5L (LSB), 5.5S, 11L, 11S (MSB) */
- REG_WRITE(ah, 0xa3cc,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(3),
POW_SM(pPwrArray[ALL_TARGET_LEGACY_11S], 24) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_11L], 16) |
POW_SM(pPwrArray[ALL_TARGET_LEGACY_5S], 8) |
@@ -3997,7 +3997,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
/* Write the HT20 power per rate set */
/* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */
- REG_WRITE(ah, 0xa3d0,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(4),
POW_SM(pPwrArray[ALL_TARGET_HT20_5], 24) |
POW_SM(pPwrArray[ALL_TARGET_HT20_4], 16) |
POW_SM(pPwrArray[ALL_TARGET_HT20_1_3_9_11_17_19], 8) |
@@ -4005,7 +4005,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
);
/* 6 (LSB), 7, 12, 13 (MSB) */
- REG_WRITE(ah, 0xa3d4,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(5),
POW_SM(pPwrArray[ALL_TARGET_HT20_13], 24) |
POW_SM(pPwrArray[ALL_TARGET_HT20_12], 16) |
POW_SM(pPwrArray[ALL_TARGET_HT20_7], 8) |
@@ -4013,7 +4013,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
);
/* 14 (LSB), 15, 20, 21 */
- REG_WRITE(ah, 0xa3e4,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(9),
POW_SM(pPwrArray[ALL_TARGET_HT20_21], 24) |
POW_SM(pPwrArray[ALL_TARGET_HT20_20], 16) |
POW_SM(pPwrArray[ALL_TARGET_HT20_15], 8) |
@@ -4023,7 +4023,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
/* Mixed HT20 and HT40 rates */
/* HT20 22 (LSB), HT20 23, HT40 22, HT40 23 (MSB) */
- REG_WRITE(ah, 0xa3e8,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(10),
POW_SM(pPwrArray[ALL_TARGET_HT40_23], 24) |
POW_SM(pPwrArray[ALL_TARGET_HT40_22], 16) |
POW_SM(pPwrArray[ALL_TARGET_HT20_23], 8) |
@@ -4035,7 +4035,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
* correct PAR difference between HT40 and HT20/LEGACY
* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB)
*/
- REG_WRITE(ah, 0xa3d8,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(6),
POW_SM(pPwrArray[ALL_TARGET_HT40_5], 24) |
POW_SM(pPwrArray[ALL_TARGET_HT40_4], 16) |
POW_SM(pPwrArray[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
@@ -4043,7 +4043,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
);
/* 6 (LSB), 7, 12, 13 (MSB) */
- REG_WRITE(ah, 0xa3dc,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(7),
POW_SM(pPwrArray[ALL_TARGET_HT40_13], 24) |
POW_SM(pPwrArray[ALL_TARGET_HT40_12], 16) |
POW_SM(pPwrArray[ALL_TARGET_HT40_7], 8) |
@@ -4051,7 +4051,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
);
/* 14 (LSB), 15, 20, 21 */
- REG_WRITE(ah, 0xa3ec,
+ REG_WRITE(ah, AR_PHY_POWER_TX_RATE(11),
POW_SM(pPwrArray[ALL_TARGET_HT40_21], 24) |
POW_SM(pPwrArray[ALL_TARGET_HT40_20], 16) |
POW_SM(pPwrArray[ALL_TARGET_HT40_15], 8) |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 4ceddbb..038a0cb 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -615,7 +615,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
*/
if (rxsp->status11 & AR_CRCErr)
rxs->rs_status |= ATH9K_RXERR_CRC;
- if (rxsp->status11 & AR_PHYErr) {
+ else if (rxsp->status11 & AR_PHYErr) {
phyerr = MS(rxsp->status11, AR_PHYErrCode);
/*
* If we reach a point here where AR_PostDelimCRCErr is
@@ -638,11 +638,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
rxs->rs_phyerr = phyerr;
}
- }
- if (rxsp->status11 & AR_DecryptCRCErr)
+ } else if (rxsp->status11 & AR_DecryptCRCErr)
rxs->rs_status |= ATH9K_RXERR_DECRYPT;
- if (rxsp->status11 & AR_MichaelErr)
+ else if (rxsp->status11 & AR_MichaelErr)
rxs->rs_status |= ATH9K_RXERR_MIC;
+
if (rxsp->status11 & AR_KeyMiss)
rxs->rs_status |= ATH9K_RXERR_DECRYPT;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 59bab6b..8bdda2c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -486,6 +486,8 @@
#define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac)
#define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0)
+#define AR_PHY_POWER_TX_RATE(_d) (AR_SM_BASE + 0x1c0 + ((_d) << 2))
+
#define AR_PHY_PWRTX_MAX (AR_SM_BASE + 0x1f0)
#define AR_PHY_POWER_TX_SUB (AR_SM_BASE + 0x1f4)
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 23838e3..9272278 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -95,9 +95,9 @@ struct ath_config {
* @BUF_XRETRY: To denote excessive retries of the buffer
*/
enum buffer_type {
- BUF_AMPDU = BIT(2),
- BUF_AGGR = BIT(3),
- BUF_XRETRY = BIT(5),
+ BUF_AMPDU = BIT(0),
+ BUF_AGGR = BIT(1),
+ BUF_XRETRY = BIT(2),
};
#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU)
@@ -137,7 +137,6 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
(((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
WME_AC_VO)
-#define ADDBA_EXCHANGE_ATTEMPTS 10
#define ATH_AGGR_DELIM_SZ 4
#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */
/* number of delimiters for encryption padding */
@@ -184,7 +183,8 @@ enum ATH_AGGR_STATUS {
#define ATH_TXFIFO_DEPTH 8
struct ath_txq {
- u32 axq_qnum;
+ int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
+ u32 axq_qnum; /* ath9k hardware queue number */
u32 *axq_link;
struct list_head axq_q;
spinlock_t axq_lock;
@@ -234,7 +234,6 @@ struct ath_buf {
bool bf_stale;
u16 bf_flags;
struct ath_buf_state bf_state;
- struct ath_wiphy *aphy;
};
struct ath_atx_tid {
@@ -255,7 +254,10 @@ struct ath_atx_tid {
};
struct ath_node {
- struct ath_common *common;
+#ifdef CONFIG_ATH9K_DEBUGFS
+ struct list_head list; /* for sc->nodes */
+ struct ieee80211_sta *sta; /* station struct we're part of */
+#endif
struct ath_atx_tid tid[WME_NUM_TID];
struct ath_atx_ac ac[WME_NUM_AC];
u16 maxampdu;
@@ -278,6 +280,11 @@ struct ath_tx_control {
#define ATH_TX_XRETRY 0x02
#define ATH_TX_BAR 0x04
+/**
+ * @txq_map: Index is mac80211 queue number. This is
+ * not necessarily the same as the hardware queue number
+ * (axq_qnum).
+ */
struct ath_tx {
u16 seq_no;
u32 txqsetup;
@@ -304,6 +311,8 @@ struct ath_rx {
struct ath_descdma rxdma;
struct ath_buf *rx_bufptr;
struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
+
+ struct sk_buff *frag;
};
int ath_startrecv(struct ath_softc *sc);
@@ -343,7 +352,6 @@ struct ath_vif {
__le64 tsf_adjust; /* TSF adjustment for staggered beacons */
enum nl80211_iftype av_opmode;
struct ath_buf *av_bcbuf;
- struct ath_tx_control av_btxctl;
u8 bssid[ETH_ALEN]; /* current BSSID from config_interface */
};
@@ -382,7 +390,6 @@ struct ath_beacon {
u32 ast_be_xmit;
u64 bc_tstamp;
struct ieee80211_vif *bslot[ATH_BCBUF];
- struct ath_wiphy *bslot_aphy[ATH_BCBUF];
int slottime;
int slotupdate;
struct ath9k_tx_queue_info beacon_qi;
@@ -393,7 +400,7 @@ struct ath_beacon {
void ath_beacon_tasklet(unsigned long data);
void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif);
-int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif);
+int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp);
int ath_beaconq_config(struct ath_softc *sc);
@@ -530,7 +537,6 @@ struct ath_ant_comb {
#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
#define ATH_MAX_SW_RETRIES 10
#define ATH_CHAN_MAX 255
-#define IEEE80211_WEP_NKID 4 /* number of key ids */
#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
#define ATH_RATE_DUMMY_MARKER 0
@@ -558,27 +564,28 @@ struct ath_ant_comb {
#define PS_WAIT_FOR_TX_ACK BIT(3)
#define PS_BEACON_SYNC BIT(4)
-struct ath_wiphy;
struct ath_rate_table;
+struct ath9k_vif_iter_data {
+ const u8 *hw_macaddr; /* phy's hardware address, set
+ * before starting iteration for
+ * valid bssid mask.
+ */
+ u8 mask[ETH_ALEN]; /* bssid mask */
+ int naps; /* number of AP vifs */
+ int nmeshes; /* number of mesh vifs */
+ int nstations; /* number of station vifs */
+ int nwds; /* number of nwd vifs */
+ int nadhocs; /* number of adhoc vifs */
+ int nothers; /* number of vifs not specified above. */
+};
+
struct ath_softc {
struct ieee80211_hw *hw;
struct device *dev;
- spinlock_t wiphy_lock; /* spinlock to protect ath_wiphy data */
- struct ath_wiphy *pri_wiphy;
- struct ath_wiphy **sec_wiphy; /* secondary wiphys (virtual radios); may
- * have NULL entries */
- int num_sec_wiphy; /* number of sec_wiphy pointers in the array */
int chan_idx;
int chan_is_ht;
- struct ath_wiphy *next_wiphy;
- struct work_struct chan_work;
- int wiphy_select_failures;
- unsigned long wiphy_select_first_fail;
- struct delayed_work wiphy_work;
- unsigned long wiphy_scheduler_int;
- int wiphy_scheduler_index;
struct survey_info *cur_survey;
struct survey_info survey[ATH9K_NUM_CHANNELS];
@@ -595,14 +602,16 @@ struct ath_softc {
struct work_struct hw_check_work;
struct completion paprd_complete;
+ unsigned int hw_busy_count;
+
u32 intrstatus;
u32 sc_flags; /* SC_OP_* */
u16 ps_flags; /* PS_* */
u16 curtxpow;
- u8 nbcnvifs;
- u16 nvifs;
bool ps_enabled;
bool ps_idle;
+ short nbcnvifs;
+ short nvifs;
unsigned long ps_usecount;
struct ath_config config;
@@ -621,13 +630,20 @@ struct ath_softc {
int led_on_cnt;
int led_off_cnt;
+ struct ath9k_hw_cal_data caldata;
+ int last_rssi;
+
int beacon_interval;
#ifdef CONFIG_ATH9K_DEBUGFS
struct ath9k_debug debug;
+ spinlock_t nodes_lock;
+ struct list_head nodes; /* basically, stations */
+ unsigned int tx_complete_poll_work_seen;
#endif
struct ath_beacon_config cur_beacon_conf;
struct delayed_work tx_complete_work;
+ struct delayed_work hw_pll_work;
struct ath_btcoex btcoex;
struct ath_descdma txsdma;
@@ -637,23 +653,6 @@ struct ath_softc {
struct pm_qos_request_list pm_qos_req;
};
-struct ath_wiphy {
- struct ath_softc *sc; /* shared for all virtual wiphys */
- struct ieee80211_hw *hw;
- struct ath9k_hw_cal_data caldata;
- enum ath_wiphy_state {
- ATH_WIPHY_INACTIVE,
- ATH_WIPHY_ACTIVE,
- ATH_WIPHY_PAUSING,
- ATH_WIPHY_PAUSED,
- ATH_WIPHY_SCAN,
- } state;
- bool idle;
- int chan_idx;
- int chan_is_ht;
- int last_rssi;
-};
-
void ath9k_tasklet(unsigned long data);
int ath_reset(struct ath_softc *sc, bool retry_tx);
int ath_cabq_update(struct ath_softc *);
@@ -675,14 +674,13 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
const struct ath_bus_ops *bus_ops);
void ath9k_deinit_device(struct ath_softc *sc);
void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
-void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
- struct ath9k_channel *ichan);
int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
struct ath9k_channel *hchan);
void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode);
+bool ath9k_uses_beacons(int type);
#ifdef CONFIG_PCI
int ath_pci_init(void);
@@ -706,26 +704,12 @@ void ath9k_ps_restore(struct ath_softc *sc);
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
-int ath9k_wiphy_add(struct ath_softc *sc);
-int ath9k_wiphy_del(struct ath_wiphy *aphy);
-void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ftype);
-int ath9k_wiphy_pause(struct ath_wiphy *aphy);
-int ath9k_wiphy_unpause(struct ath_wiphy *aphy);
-int ath9k_wiphy_select(struct ath_wiphy *aphy);
-void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int);
-void ath9k_wiphy_chan_work(struct work_struct *work);
-bool ath9k_wiphy_started(struct ath_softc *sc);
-void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
- struct ath_wiphy *selected);
-bool ath9k_wiphy_scanning(struct ath_softc *sc);
-void ath9k_wiphy_work(struct work_struct *work);
-bool ath9k_all_wiphys_idle(struct ath_softc *sc);
-void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle);
-
-void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
-bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
void ath_start_rfkill_poll(struct ath_softc *sc);
extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
+void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ath9k_vif_iter_data *iter_data);
+
#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 385ba03..fcb36ab 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -112,8 +112,7 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_tx_control txctl;
@@ -132,8 +131,7 @@ static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_buf *bf;
struct ath_vif *avp;
@@ -142,9 +140,6 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info;
int cabq_depth;
- if (aphy->state != ATH_WIPHY_ACTIVE)
- return NULL;
-
avp = (void *)vif->drv_priv;
cabq = sc->beacon.cabq;
@@ -225,9 +220,8 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
return bf;
}
-int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
+int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif)
{
- struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_vif *avp;
struct ath_buf *bf;
@@ -244,9 +238,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
struct ath_buf, list);
list_del(&avp->av_bcbuf->list);
- if (sc->sc_ah->opmode == NL80211_IFTYPE_AP ||
- sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC ||
- sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) {
+ if (ath9k_uses_beacons(vif->type)) {
int slot;
/*
* Assign the vif to a beacon xmit slot. As
@@ -263,7 +255,6 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
}
BUG_ON(sc->beacon.bslot[avp->av_bslot] != NULL);
sc->beacon.bslot[avp->av_bslot] = vif;
- sc->beacon.bslot_aphy[avp->av_bslot] = aphy;
sc->nbcnvifs++;
}
}
@@ -281,10 +272,8 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
/* NB: the beacon data buffer must be 32-bit aligned. */
skb = ieee80211_beacon_get(sc->hw, vif);
- if (skb == NULL) {
- ath_dbg(common, ATH_DBG_BEACON, "cannot get skb\n");
+ if (skb == NULL)
return -ENOMEM;
- }
tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
sc->beacon.bc_tstamp = le64_to_cpu(tstamp);
@@ -336,7 +325,6 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp)
if (avp->av_bslot != -1) {
sc->beacon.bslot[avp->av_bslot] = NULL;
- sc->beacon.bslot_aphy[avp->av_bslot] = NULL;
sc->nbcnvifs--;
}
@@ -362,7 +350,6 @@ void ath_beacon_tasklet(unsigned long data)
struct ath_common *common = ath9k_hw_common(ah);
struct ath_buf *bf = NULL;
struct ieee80211_vif *vif;
- struct ath_wiphy *aphy;
int slot;
u32 bfaddr, bc = 0, tsftu;
u64 tsf;
@@ -420,7 +407,6 @@ void ath_beacon_tasklet(unsigned long data)
*/
slot = ATH_BCBUF - slot - 1;
vif = sc->beacon.bslot[slot];
- aphy = sc->beacon.bslot_aphy[slot];
ath_dbg(common, ATH_DBG_BEACON,
"slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
@@ -428,7 +414,7 @@ void ath_beacon_tasklet(unsigned long data)
bfaddr = 0;
if (vif) {
- bf = ath_beacon_generate(aphy->hw, vif);
+ bf = ath_beacon_generate(sc->hw, vif);
if (bf != NULL) {
bfaddr = bf->bf_daddr;
bc = 1;
@@ -720,10 +706,10 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
iftype = sc->sc_ah->opmode;
}
- cur_conf->listen_interval = 1;
- cur_conf->dtim_count = 1;
- cur_conf->bmiss_timeout =
- ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
+ cur_conf->listen_interval = 1;
+ cur_conf->dtim_count = 1;
+ cur_conf->bmiss_timeout =
+ ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
/*
* It looks like mac80211 may end up using beacon interval of zero in
@@ -735,8 +721,9 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
cur_conf->beacon_interval = 100;
/*
- * Some times we dont parse dtim period from mac80211, in that case
- * use a default value
+ * We don't parse dtim period from mac80211 during the driver
+ * initialization as it breaks association with hidden-ssid
+ * AP and it causes latency in roaming
*/
if (cur_conf->dtim_period == 0)
cur_conf->dtim_period = 1;
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index b68a1ac..b4a92a4 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -382,9 +382,8 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
s16 default_nf;
int i, j;
- if (!ah->caldata)
- return;
-
+ ah->caldata->channel = chan->channel;
+ ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT;
h = ah->caldata->nfCalHist;
default_nf = ath9k_hw_get_default_nf(ah, chan);
for (i = 0; i < NUM_NF_READINGS; i++) {
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index df1998d..615e682 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -189,6 +189,17 @@ void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
}
EXPORT_SYMBOL(ath9k_cmn_btcoex_bt_stomp);
+void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
+ u16 new_txpow, u16 *txpower)
+{
+ if (cur_txpow != new_txpow) {
+ ath9k_hw_set_txpowerlimit(ah, new_txpow, false);
+ /* read back in case value is clamped */
+ *txpower = ath9k_hw_regulatory(ah)->power_limit;
+ }
+}
+EXPORT_SYMBOL(ath9k_cmn_update_txpow);
+
static int __init ath9k_cmn_init(void)
{
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index a126bdd..b2f7b5f 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -23,8 +23,6 @@
/* Common header for Atheros 802.11n base driver cores */
-#define IEEE80211_WEP_NKID 4
-
#define WME_NUM_TID 16
#define WME_BA_BMP_SIZE 64
#define WME_MAX_BA WME_BA_BMP_SIZE
@@ -70,3 +68,5 @@ struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
int ath9k_cmn_count_streams(unsigned int chainmask, int max);
void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
enum ath_stomp_type stomp_type);
+void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
+ u16 new_txpow, u16 *txpower);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 3586c43..5cfcf8c 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -381,41 +381,40 @@ static const struct file_operations fops_interrupt = {
.llseek = default_llseek,
};
-static const char * ath_wiphy_state_str(enum ath_wiphy_state state)
+static const char *channel_type_str(enum nl80211_channel_type t)
{
- switch (state) {
- case ATH_WIPHY_INACTIVE:
- return "INACTIVE";
- case ATH_WIPHY_ACTIVE:
- return "ACTIVE";
- case ATH_WIPHY_PAUSING:
- return "PAUSING";
- case ATH_WIPHY_PAUSED:
- return "PAUSED";
- case ATH_WIPHY_SCAN:
- return "SCAN";
+ switch (t) {
+ case NL80211_CHAN_NO_HT:
+ return "no ht";
+ case NL80211_CHAN_HT20:
+ return "ht20";
+ case NL80211_CHAN_HT40MINUS:
+ return "ht40-";
+ case NL80211_CHAN_HT40PLUS:
+ return "ht40+";
+ default:
+ return "???";
}
- return "?";
}
static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
- struct ath_wiphy *aphy = sc->pri_wiphy;
- struct ieee80211_channel *chan = aphy->hw->conf.channel;
+ struct ieee80211_channel *chan = sc->hw->conf.channel;
+ struct ieee80211_conf *conf = &(sc->hw->conf);
char buf[512];
unsigned int len = 0;
- int i;
u8 addr[ETH_ALEN];
u32 tmp;
len += snprintf(buf + len, sizeof(buf) - len,
- "primary: %s (%s chan=%d ht=%d)\n",
- wiphy_name(sc->pri_wiphy->hw->wiphy),
- ath_wiphy_state_str(sc->pri_wiphy->state),
+ "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
+ wiphy_name(sc->hw->wiphy),
ieee80211_frequency_to_channel(chan->center_freq),
- aphy->chan_is_ht);
+ chan->center_freq,
+ conf->channel_type,
+ channel_type_str(conf->channel_type));
put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_STA_ID0), addr);
put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4);
@@ -457,156 +456,82 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
else
len += snprintf(buf + len, sizeof(buf) - len, "\n");
- /* Put variable-length stuff down here, and check for overflows. */
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy_tmp = sc->sec_wiphy[i];
- if (aphy_tmp == NULL)
- continue;
- chan = aphy_tmp->hw->conf.channel;
- len += snprintf(buf + len, sizeof(buf) - len,
- "secondary: %s (%s chan=%d ht=%d)\n",
- wiphy_name(aphy_tmp->hw->wiphy),
- ath_wiphy_state_str(aphy_tmp->state),
- ieee80211_frequency_to_channel(chan->center_freq),
- aphy_tmp->chan_is_ht);
- }
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
-static struct ath_wiphy * get_wiphy(struct ath_softc *sc, const char *name)
-{
- int i;
- if (strcmp(name, wiphy_name(sc->pri_wiphy->hw->wiphy)) == 0)
- return sc->pri_wiphy;
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (aphy && strcmp(name, wiphy_name(aphy->hw->wiphy)) == 0)
- return aphy;
- }
- return NULL;
-}
-
-static int del_wiphy(struct ath_softc *sc, const char *name)
-{
- struct ath_wiphy *aphy = get_wiphy(sc, name);
- if (!aphy)
- return -ENOENT;
- return ath9k_wiphy_del(aphy);
-}
-
-static int pause_wiphy(struct ath_softc *sc, const char *name)
-{
- struct ath_wiphy *aphy = get_wiphy(sc, name);
- if (!aphy)
- return -ENOENT;
- return ath9k_wiphy_pause(aphy);
-}
-
-static int unpause_wiphy(struct ath_softc *sc, const char *name)
-{
- struct ath_wiphy *aphy = get_wiphy(sc, name);
- if (!aphy)
- return -ENOENT;
- return ath9k_wiphy_unpause(aphy);
-}
-
-static int select_wiphy(struct ath_softc *sc, const char *name)
-{
- struct ath_wiphy *aphy = get_wiphy(sc, name);
- if (!aphy)
- return -ENOENT;
- return ath9k_wiphy_select(aphy);
-}
-
-static int schedule_wiphy(struct ath_softc *sc, const char *msec)
-{
- ath9k_wiphy_set_scheduler(sc, simple_strtoul(msec, NULL, 0));
- return 0;
-}
-
-static ssize_t write_file_wiphy(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- char buf[50];
- size_t len;
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
- buf[len] = '\0';
- if (len > 0 && buf[len - 1] == '\n')
- buf[len - 1] = '\0';
-
- if (strncmp(buf, "add", 3) == 0) {
- int res = ath9k_wiphy_add(sc);
- if (res < 0)
- return res;
- } else if (strncmp(buf, "del=", 4) == 0) {
- int res = del_wiphy(sc, buf + 4);
- if (res < 0)
- return res;
- } else if (strncmp(buf, "pause=", 6) == 0) {
- int res = pause_wiphy(sc, buf + 6);
- if (res < 0)
- return res;
- } else if (strncmp(buf, "unpause=", 8) == 0) {
- int res = unpause_wiphy(sc, buf + 8);
- if (res < 0)
- return res;
- } else if (strncmp(buf, "select=", 7) == 0) {
- int res = select_wiphy(sc, buf + 7);
- if (res < 0)
- return res;
- } else if (strncmp(buf, "schedule=", 9) == 0) {
- int res = schedule_wiphy(sc, buf + 9);
- if (res < 0)
- return res;
- } else
- return -EOPNOTSUPP;
-
- return count;
-}
-
static const struct file_operations fops_wiphy = {
.read = read_file_wiphy,
- .write = write_file_wiphy,
.open = ath9k_debugfs_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
+#define PR_QNUM(_n) sc->tx.txq_map[_n]->axq_qnum
#define PR(str, elem) \
do { \
len += snprintf(buf + len, size - len, \
"%s%13u%11u%10u%10u\n", str, \
- sc->debug.stats.txstats[WME_AC_BE].elem, \
- sc->debug.stats.txstats[WME_AC_BK].elem, \
- sc->debug.stats.txstats[WME_AC_VI].elem, \
- sc->debug.stats.txstats[WME_AC_VO].elem); \
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].elem, \
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].elem, \
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].elem, \
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].elem); \
+ if (len >= size) \
+ goto done; \
+} while(0)
+
+#define PRX(str, elem) \
+do { \
+ len += snprintf(buf + len, size - len, \
+ "%s%13u%11u%10u%10u\n", str, \
+ (unsigned int)(sc->tx.txq_map[WME_AC_BE]->elem), \
+ (unsigned int)(sc->tx.txq_map[WME_AC_BK]->elem), \
+ (unsigned int)(sc->tx.txq_map[WME_AC_VI]->elem), \
+ (unsigned int)(sc->tx.txq_map[WME_AC_VO]->elem)); \
+ if (len >= size) \
+ goto done; \
} while(0)
+#define PRQLE(str, elem) \
+do { \
+ len += snprintf(buf + len, size - len, \
+ "%s%13i%11i%10i%10i\n", str, \
+ list_empty(&sc->tx.txq_map[WME_AC_BE]->elem), \
+ list_empty(&sc->tx.txq_map[WME_AC_BK]->elem), \
+ list_empty(&sc->tx.txq_map[WME_AC_VI]->elem), \
+ list_empty(&sc->tx.txq_map[WME_AC_VO]->elem)); \
+ if (len >= size) \
+ goto done; \
+} while (0)
+
static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
char *buf;
- unsigned int len = 0, size = 2048;
+ unsigned int len = 0, size = 8000;
+ int i;
ssize_t retval = 0;
+ char tmp[32];
buf = kzalloc(size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- len += sprintf(buf, "%30s %10s%10s%10s\n\n", "BE", "BK", "VI", "VO");
+ len += sprintf(buf, "Num-Tx-Queues: %i tx-queues-setup: 0x%x"
+ " poll-work-seen: %u\n"
+ "%30s %10s%10s%10s\n\n",
+ ATH9K_NUM_TX_QUEUES, sc->tx.txqsetup,
+ sc->tx_complete_poll_work_seen,
+ "BE", "BK", "VI", "VO");
PR("MPDUs Queued: ", queued);
PR("MPDUs Completed: ", completed);
PR("Aggregates: ", a_aggr);
- PR("AMPDUs Queued: ", a_queued);
+ PR("AMPDUs Queued HW:", a_queued_hw);
+ PR("AMPDUs Queued SW:", a_queued_sw);
PR("AMPDUs Completed:", a_completed);
PR("AMPDUs Retried: ", a_retries);
PR("AMPDUs XRetried: ", a_xretries);
@@ -618,6 +543,223 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
PR("DELIM Underrun: ", delim_underrun);
PR("TX-Pkts-All: ", tx_pkts_all);
PR("TX-Bytes-All: ", tx_bytes_all);
+ PR("hw-put-tx-buf: ", puttxbuf);
+ PR("hw-tx-start: ", txstart);
+ PR("hw-tx-proc-desc: ", txprocdesc);
+ len += snprintf(buf + len, size - len,
+ "%s%11p%11p%10p%10p\n", "txq-memory-address:",
+ &(sc->tx.txq_map[WME_AC_BE]),
+ &(sc->tx.txq_map[WME_AC_BK]),
+ &(sc->tx.txq_map[WME_AC_VI]),
+ &(sc->tx.txq_map[WME_AC_VO]));
+ if (len >= size)
+ goto done;
+
+ PRX("axq-qnum: ", axq_qnum);
+ PRX("axq-depth: ", axq_depth);
+ PRX("axq-ampdu_depth: ", axq_ampdu_depth);
+ PRX("axq-stopped ", stopped);
+ PRX("tx-in-progress ", axq_tx_inprogress);
+ PRX("pending-frames ", pending_frames);
+ PRX("txq_headidx: ", txq_headidx);
+ PRX("txq_tailidx: ", txq_headidx);
+
+ PRQLE("axq_q empty: ", axq_q);
+ PRQLE("axq_acq empty: ", axq_acq);
+ PRQLE("txq_fifo_pending: ", txq_fifo_pending);
+ for (i = 0; i < ATH_TXFIFO_DEPTH; i++) {
+ snprintf(tmp, sizeof(tmp) - 1, "txq_fifo[%i] empty: ", i);
+ PRQLE(tmp, txq_fifo[i]);
+ }
+
+ /* Print out more detailed queue-info */
+ for (i = 0; i <= WME_AC_BK; i++) {
+ struct ath_txq *txq = &(sc->tx.txq[i]);
+ struct ath_atx_ac *ac;
+ struct ath_atx_tid *tid;
+ if (len >= size)
+ goto done;
+ spin_lock_bh(&txq->axq_lock);
+ if (!list_empty(&txq->axq_acq)) {
+ ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac,
+ list);
+ len += snprintf(buf + len, size - len,
+ "txq[%i] first-ac: %p sched: %i\n",
+ i, ac, ac->sched);
+ if (list_empty(&ac->tid_q) || (len >= size))
+ goto done_for;
+ tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
+ list);
+ len += snprintf(buf + len, size - len,
+ " first-tid: %p sched: %i paused: %i\n",
+ tid, tid->sched, tid->paused);
+ }
+ done_for:
+ spin_unlock_bh(&txq->axq_lock);
+ }
+
+done:
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static ssize_t read_file_stations(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ char *buf;
+ unsigned int len = 0, size = 64000;
+ struct ath_node *an = NULL;
+ ssize_t retval = 0;
+ int q;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ len += snprintf(buf + len, size - len,
+ "Stations:\n"
+ " tid: addr sched paused buf_q-empty an ac\n"
+ " ac: addr sched tid_q-empty txq\n");
+
+ spin_lock(&sc->nodes_lock);
+ list_for_each_entry(an, &sc->nodes, list) {
+ len += snprintf(buf + len, size - len,
+ "%pM\n", an->sta->addr);
+ if (len >= size)
+ goto done;
+
+ for (q = 0; q < WME_NUM_TID; q++) {
+ struct ath_atx_tid *tid = &(an->tid[q]);
+ len += snprintf(buf + len, size - len,
+ " tid: %p %s %s %i %p %p\n",
+ tid, tid->sched ? "sched" : "idle",
+ tid->paused ? "paused" : "running",
+ list_empty(&tid->buf_q),
+ tid->an, tid->ac);
+ if (len >= size)
+ goto done;
+ }
+
+ for (q = 0; q < WME_NUM_AC; q++) {
+ struct ath_atx_ac *ac = &(an->ac[q]);
+ len += snprintf(buf + len, size - len,
+ " ac: %p %s %i %p\n",
+ ac, ac->sched ? "sched" : "idle",
+ list_empty(&ac->tid_q), ac->txq);
+ if (len >= size)
+ goto done;
+ }
+ }
+
+done:
+ spin_unlock(&sc->nodes_lock);
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static ssize_t read_file_misc(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ieee80211_hw *hw = sc->hw;
+ char *buf;
+ unsigned int len = 0, size = 8000;
+ ssize_t retval = 0;
+ const char *tmp;
+ unsigned int reg;
+ struct ath9k_vif_iter_data iter_data;
+
+ ath9k_calculate_iter_data(hw, NULL, &iter_data);
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ switch (sc->sc_ah->opmode) {
+ case NL80211_IFTYPE_ADHOC:
+ tmp = "ADHOC";
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ tmp = "MESH";
+ break;
+ case NL80211_IFTYPE_AP:
+ tmp = "AP";
+ break;
+ case NL80211_IFTYPE_STATION:
+ tmp = "STATION";
+ break;
+ default:
+ tmp = "???";
+ break;
+ }
+
+ len += snprintf(buf + len, size - len,
+ "curbssid: %pM\n"
+ "OP-Mode: %s(%i)\n"
+ "Beacon-Timer-Register: 0x%x\n",
+ common->curbssid,
+ tmp, (int)(sc->sc_ah->opmode),
+ REG_READ(ah, AR_BEACON_PERIOD));
+
+ reg = REG_READ(ah, AR_TIMER_MODE);
+ len += snprintf(buf + len, size - len, "Timer-Mode-Register: 0x%x (",
+ reg);
+ if (reg & AR_TBTT_TIMER_EN)
+ len += snprintf(buf + len, size - len, "TBTT ");
+ if (reg & AR_DBA_TIMER_EN)
+ len += snprintf(buf + len, size - len, "DBA ");
+ if (reg & AR_SWBA_TIMER_EN)
+ len += snprintf(buf + len, size - len, "SWBA ");
+ if (reg & AR_HCF_TIMER_EN)
+ len += snprintf(buf + len, size - len, "HCF ");
+ if (reg & AR_TIM_TIMER_EN)
+ len += snprintf(buf + len, size - len, "TIM ");
+ if (reg & AR_DTIM_TIMER_EN)
+ len += snprintf(buf + len, size - len, "DTIM ");
+ len += snprintf(buf + len, size - len, ")\n");
+
+ reg = sc->sc_ah->imask;
+ len += snprintf(buf + len, size - len, "imask: 0x%x (", reg);
+ if (reg & ATH9K_INT_SWBA)
+ len += snprintf(buf + len, size - len, "SWBA ");
+ if (reg & ATH9K_INT_BMISS)
+ len += snprintf(buf + len, size - len, "BMISS ");
+ if (reg & ATH9K_INT_CST)
+ len += snprintf(buf + len, size - len, "CST ");
+ if (reg & ATH9K_INT_RX)
+ len += snprintf(buf + len, size - len, "RX ");
+ if (reg & ATH9K_INT_RXHP)
+ len += snprintf(buf + len, size - len, "RXHP ");
+ if (reg & ATH9K_INT_RXLP)
+ len += snprintf(buf + len, size - len, "RXLP ");
+ if (reg & ATH9K_INT_BB_WATCHDOG)
+ len += snprintf(buf + len, size - len, "BB_WATCHDOG ");
+ /* there are other IRQs if one wanted to add them. */
+ len += snprintf(buf + len, size - len, ")\n");
+
+ len += snprintf(buf + len, size - len,
+ "VIF Counts: AP: %i STA: %i MESH: %i WDS: %i"
+ " ADHOC: %i OTHER: %i nvifs: %hi beacon-vifs: %hi\n",
+ iter_data.naps, iter_data.nstations, iter_data.nmeshes,
+ iter_data.nwds, iter_data.nadhocs, iter_data.nothers,
+ sc->nvifs, sc->nbcnvifs);
+
+ len += snprintf(buf + len, size - len,
+ "Calculated-BSSID-Mask: %pM\n",
+ iter_data.mask);
if (len > size)
len = size;
@@ -629,9 +771,9 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
}
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
- struct ath_tx_status *ts)
+ struct ath_tx_status *ts, struct ath_txq *txq)
{
- int qnum = skb_get_queue_mapping(bf->bf_mpdu);
+ int qnum = txq->axq_qnum;
TX_STAT_INC(qnum, tx_pkts_all);
sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
@@ -666,6 +808,20 @@ static const struct file_operations fops_xmit = {
.llseek = default_llseek,
};
+static const struct file_operations fops_stations = {
+ .read = read_file_stations,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static const struct file_operations fops_misc = {
+ .read = read_file_misc,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
static ssize_t read_file_recv(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -903,6 +1059,14 @@ int ath9k_init_debug(struct ath_hw *ah)
sc, &fops_xmit))
goto err;
+ if (!debugfs_create_file("stations", S_IRUSR, sc->debug.debugfs_phy,
+ sc, &fops_stations))
+ goto err;
+
+ if (!debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy,
+ sc, &fops_misc))
+ goto err;
+
if (!debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy,
sc, &fops_recv))
goto err;
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 1e5078b..59338de 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -89,7 +89,8 @@ struct ath_interrupt_stats {
* @queued: Total MPDUs (non-aggr) queued
* @completed: Total MPDUs (non-aggr) completed
* @a_aggr: Total no. of aggregates queued
- * @a_queued: Total AMPDUs queued
+ * @a_queued_hw: Total AMPDUs queued to hardware
+ * @a_queued_sw: Total AMPDUs queued to software queues
* @a_completed: Total AMPDUs completed
* @a_retries: No. of AMPDUs retried (SW)
* @a_xretries: No. of AMPDUs dropped due to xretries
@@ -102,6 +103,9 @@ struct ath_interrupt_stats {
* @desc_cfg_err: Descriptor configuration errors
* @data_urn: TX data underrun errors
* @delim_urn: TX delimiter underrun errors
+ * @puttxbuf: Number of times hardware was given txbuf to write.
+ * @txstart: Number of times hardware was told to start tx.
+ * @txprocdesc: Number of times tx descriptor was processed
*/
struct ath_tx_stats {
u32 tx_pkts_all;
@@ -109,7 +113,8 @@ struct ath_tx_stats {
u32 queued;
u32 completed;
u32 a_aggr;
- u32 a_queued;
+ u32 a_queued_hw;
+ u32 a_queued_sw;
u32 a_completed;
u32 a_retries;
u32 a_xretries;
@@ -119,6 +124,9 @@ struct ath_tx_stats {
u32 desc_cfg_err;
u32 data_underrun;
u32 delim_underrun;
+ u32 puttxbuf;
+ u32 txstart;
+ u32 txprocdesc;
};
/**
@@ -167,7 +175,7 @@ int ath9k_init_debug(struct ath_hw *ah);
void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
- struct ath_tx_status *ts);
+ struct ath_tx_status *ts, struct ath_txq *txq);
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
#else
@@ -184,7 +192,8 @@ static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
static inline void ath_debug_stat_tx(struct ath_softc *sc,
struct ath_buf *bf,
- struct ath_tx_status *ts)
+ struct ath_tx_status *ts,
+ struct ath_txq *txq)
{
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index d051631..8c18bed 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -89,6 +89,38 @@ bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
return false;
}
+void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
+ int eep_start_loc, int size)
+{
+ int i = 0, j, addr;
+ u32 addrdata[8];
+ u32 data[8];
+
+ for (addr = 0; addr < size; addr++) {
+ addrdata[i] = AR5416_EEPROM_OFFSET +
+ ((addr + eep_start_loc) << AR5416_EEPROM_S);
+ i++;
+ if (i == 8) {
+ REG_READ_MULTI(ah, addrdata, data, i);
+
+ for (j = 0; j < i; j++) {
+ *eep_data = data[j];
+ eep_data++;
+ }
+ i = 0;
+ }
+ }
+
+ if (i != 0) {
+ REG_READ_MULTI(ah, addrdata, data, i);
+
+ for (j = 0; j < i; j++) {
+ *eep_data = data[j];
+ eep_data++;
+ }
+ }
+}
+
bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data)
{
return common->bus_ops->eeprom_read(common, off, data);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 58e2ddc..bd82447 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -665,6 +665,8 @@ int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight,
bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
u16 *indexL, u16 *indexR);
bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data);
+void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
+ int eep_start_loc, int size);
void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
u8 *pVpdList, u16 numIntercepts,
u8 *pRetVpdList);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index fbdff7e..bc77a30 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -27,19 +27,13 @@ static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah)
return ((ah->eeprom.map4k.baseEepHeader.version) & 0xFFF);
}
-static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
-{
#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
+
+static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
+{
struct ath_common *common = ath9k_hw_common(ah);
u16 *eep_data = (u16 *)&ah->eeprom.map4k;
- int addr, eep_start_loc = 0;
-
- eep_start_loc = 64;
-
- if (!ath9k_hw_use_flash(ah)) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "Reading from EEPROM, not flash\n");
- }
+ int addr, eep_start_loc = 64;
for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
@@ -51,9 +45,34 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
}
return true;
-#undef SIZE_EEPROM_4K
}
+static bool __ath9k_hw_usb_4k_fill_eeprom(struct ath_hw *ah)
+{
+ u16 *eep_data = (u16 *)&ah->eeprom.map4k;
+
+ ath9k_hw_usb_gen_fill_eeprom(ah, eep_data, 64, SIZE_EEPROM_4K);
+
+ return true;
+}
+
+static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!ath9k_hw_use_flash(ah)) {
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Reading from EEPROM, not flash\n");
+ }
+
+ if (common->bus_ops->ath_bus_type == ATH_USB)
+ return __ath9k_hw_usb_4k_fill_eeprom(ah);
+ else
+ return __ath9k_hw_4k_fill_eeprom(ah);
+}
+
+#undef SIZE_EEPROM_4K
+
static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
{
#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 9b6bc8a..8cd8333 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -17,7 +17,7 @@
#include "hw.h"
#include "ar9002_phy.h"
-#define NUM_EEP_WORDS (sizeof(struct ar9287_eeprom) / sizeof(u16))
+#define SIZE_EEPROM_AR9287 (sizeof(struct ar9287_eeprom) / sizeof(u16))
static int ath9k_hw_ar9287_get_eeprom_ver(struct ath_hw *ah)
{
@@ -29,25 +29,15 @@ static int ath9k_hw_ar9287_get_eeprom_rev(struct ath_hw *ah)
return (ah->eeprom.map9287.baseEepHeader.version) & 0xFFF;
}
-static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
+static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
{
struct ar9287_eeprom *eep = &ah->eeprom.map9287;
struct ath_common *common = ath9k_hw_common(ah);
u16 *eep_data;
- int addr, eep_start_loc;
+ int addr, eep_start_loc = AR9287_EEP_START_LOC;
eep_data = (u16 *)eep;
- if (common->bus_ops->ath_bus_type == ATH_USB)
- eep_start_loc = AR9287_HTC_EEP_START_LOC;
- else
- eep_start_loc = AR9287_EEP_START_LOC;
-
- if (!ath9k_hw_use_flash(ah)) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "Reading from EEPROM, not flash\n");
- }
-
- for (addr = 0; addr < NUM_EEP_WORDS; addr++) {
+ for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) {
if (!ath9k_hw_nvram_read(common, addr + eep_start_loc,
eep_data)) {
ath_dbg(common, ATH_DBG_EEPROM,
@@ -60,6 +50,31 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
return true;
}
+static bool __ath9k_hw_usb_ar9287_fill_eeprom(struct ath_hw *ah)
+{
+ u16 *eep_data = (u16 *)&ah->eeprom.map9287;
+
+ ath9k_hw_usb_gen_fill_eeprom(ah, eep_data,
+ AR9287_HTC_EEP_START_LOC,
+ SIZE_EEPROM_AR9287);
+ return true;
+}
+
+static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!ath9k_hw_use_flash(ah)) {
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Reading from EEPROM, not flash\n");
+ }
+
+ if (common->bus_ops->ath_bus_type == ATH_USB)
+ return __ath9k_hw_usb_ar9287_fill_eeprom(ah);
+ else
+ return __ath9k_hw_ar9287_fill_eeprom(ah);
+}
+
static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
{
u32 sum = 0, el, integer;
@@ -86,7 +101,7 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
need_swap = true;
eepdata = (u16 *)(&ah->eeprom);
- for (addr = 0; addr < NUM_EEP_WORDS; addr++) {
+ for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) {
temp = swab16(*eepdata);
*eepdata = temp;
eepdata++;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 749a936..fccd87d 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -86,9 +86,10 @@ static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah)
return ((ah->eeprom.def.baseEepHeader.version) & 0xFFF);
}
-static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
-{
#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16))
+
+static bool __ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
+{
struct ath_common *common = ath9k_hw_common(ah);
u16 *eep_data = (u16 *)&ah->eeprom.def;
int addr, ar5416_eep_start_loc = 0x100;
@@ -103,9 +104,34 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
eep_data++;
}
return true;
-#undef SIZE_EEPROM_DEF
}
+static bool __ath9k_hw_usb_def_fill_eeprom(struct ath_hw *ah)
+{
+ u16 *eep_data = (u16 *)&ah->eeprom.def;
+
+ ath9k_hw_usb_gen_fill_eeprom(ah, eep_data,
+ 0x100, SIZE_EEPROM_DEF);
+ return true;
+}
+
+static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!ath9k_hw_use_flash(ah)) {
+ ath_dbg(common, ATH_DBG_EEPROM,
+ "Reading from EEPROM, not flash\n");
+ }
+
+ if (common->bus_ops->ath_bus_type == ATH_USB)
+ return __ath9k_hw_usb_def_fill_eeprom(ah);
+ else
+ return __ath9k_hw_def_fill_eeprom(ah);
+}
+
+#undef SIZE_EEPROM_DEF
+
static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
{
struct ar5416_eeprom_def *eep =
@@ -221,9 +247,9 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
}
/* Enable fixup for AR_AN_TOP2 if necessary */
- if (AR_SREV_9280_20_OR_LATER(ah) &&
- (eep->baseEepHeader.version & 0xff) > 0x0a &&
- eep->baseEepHeader.pwdclkind == 0)
+ if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
+ ((eep->baseEepHeader.version & 0xff) > 0x0a) &&
+ (eep->baseEepHeader.pwdclkind == 0))
ah->need_an_top2_fixup = 1;
if ((common->bus_ops->ath_bus_type == ATH_USB) &&
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 1337640..fb4f17a 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -201,8 +201,7 @@ static bool ath_is_rfkill_set(struct ath_softc *sc)
void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
bool blocked = !!ath_is_rfkill_set(sc);
wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 780ac5e..0cb504d 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -366,7 +366,7 @@ struct ath9k_htc_priv {
u16 seq_no;
u32 bmiss_cnt;
- struct ath9k_hw_cal_data caldata[ATH9K_NUM_CHANNELS];
+ struct ath9k_hw_cal_data caldata;
spinlock_t beacon_lock;
@@ -460,7 +460,6 @@ void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv);
void ath9k_ps_work(struct work_struct *work);
bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
enum ath9k_power_mode mode);
-void ath_update_txpow(struct ath9k_htc_priv *priv);
void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index fe70f67..7e630a8 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -389,7 +389,8 @@ void ath9k_htc_radio_enable(struct ieee80211_hw *hw)
ret, ah->curchan->channel);
}
- ath_update_txpow(priv);
+ ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+ &priv->curtxpow);
/* Start RX */
WMI_CMD(WMI_START_RECV_CMDID);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 0352f09..a7bc26d 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -294,6 +294,34 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
return be32_to_cpu(val);
}
+static void ath9k_multi_regread(void *hw_priv, u32 *addr,
+ u32 *val, u16 count)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
+ __be32 tmpaddr[8];
+ __be32 tmpval[8];
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ tmpaddr[i] = cpu_to_be32(addr[i]);
+ }
+
+ ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
+ (u8 *)tmpaddr , sizeof(u32) * count,
+ (u8 *)tmpval, sizeof(u32) * count,
+ 100);
+ if (unlikely(ret)) {
+ ath_dbg(common, ATH_DBG_WMI,
+ "Multiple REGISTER READ FAILED (count: %d)\n", count);
+ }
+
+ for (i = 0; i < count; i++) {
+ val[i] = be32_to_cpu(tmpval[i]);
+ }
+}
+
static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
{
struct ath_hw *ah = (struct ath_hw *) hw_priv;
@@ -404,6 +432,7 @@ static void ath9k_regwrite_flush(void *hw_priv)
static const struct ath_ops ath9k_common_ops = {
.read = ath9k_regread,
+ .multi_read = ath9k_multi_regread,
.write = ath9k_regwrite,
.enable_write_buffer = ath9k_enable_regwrite_buffer,
.write_flush = ath9k_regwrite_flush,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 6bb5995..953036a 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -24,17 +24,6 @@ static struct dentry *ath9k_debugfs_root;
/* Utilities */
/*************/
-void ath_update_txpow(struct ath9k_htc_priv *priv)
-{
- struct ath_hw *ah = priv->ah;
-
- if (priv->curtxpow != priv->txpowlimit) {
- ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit, false);
- /* read back in case value is clamped */
- priv->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
- }
-}
-
/* HACK Alert: Use 11NG for 2.4, use 11NA for 5 */
static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
struct ath9k_channel *ichan)
@@ -121,7 +110,7 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
struct ath_hw *ah = priv->ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_channel *channel = priv->hw->conf.channel;
- struct ath9k_hw_cal_data *caldata;
+ struct ath9k_hw_cal_data *caldata = NULL;
enum htc_phymode mode;
__be16 htc_mode;
u8 cmd_rsp;
@@ -139,7 +128,7 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
WMI_CMD(WMI_STOP_RECV_CMDID);
- caldata = &priv->caldata[channel->hw_value];
+ caldata = &priv->caldata;
ret = ath9k_hw_reset(ah, ah->curchan, caldata, false);
if (ret) {
ath_err(common,
@@ -147,7 +136,8 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv)
channel->center_freq, ret);
}
- ath_update_txpow(priv);
+ ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+ &priv->curtxpow);
WMI_CMD(WMI_START_RECV_CMDID);
ath9k_host_rx_init(priv);
@@ -202,7 +192,8 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf),
fastcc);
- caldata = &priv->caldata[channel->hw_value];
+ if (!fastcc)
+ caldata = &priv->caldata;
ret = ath9k_hw_reset(ah, hchan, caldata, fastcc);
if (ret) {
ath_err(common,
@@ -211,7 +202,8 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
goto err;
}
- ath_update_txpow(priv);
+ ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+ &priv->curtxpow);
WMI_CMD(WMI_START_RECV_CMDID);
if (ret)
@@ -987,7 +979,8 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
return ret;
}
- ath_update_txpow(priv);
+ ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
+ &priv->curtxpow);
mode = ath9k_htc_get_curmode(priv, init_channel);
htc_mode = cpu_to_be16(mode);
@@ -1051,6 +1044,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
cancel_work_sync(&priv->fatal_work);
cancel_work_sync(&priv->ps_work);
cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
+ cancel_delayed_work_sync(&priv->ath9k_ani_work);
ath9k_led_stop_brightness(priv);
mutex_lock(&priv->mutex);
@@ -1252,7 +1246,8 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_POWER) {
priv->txpowlimit = 2 * conf->power_level;
- ath_update_txpow(priv);
+ ath9k_cmn_update_txpow(priv->ah, priv->curtxpow,
+ priv->txpowlimit, &priv->curtxpow);
}
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
@@ -1557,7 +1552,7 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta,
- u16 tid, u16 *ssn)
+ u16 tid, u16 *ssn, u8 buf_size)
{
struct ath9k_htc_priv *priv = hw->priv;
struct ath9k_htc_sta *ista;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 9f01e50..f9cf815 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -495,6 +495,17 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (ah->hw_version.devid == AR5416_AR9100_DEVID)
ah->hw_version.macVersion = AR_SREV_VERSION_9100;
+ ath9k_hw_read_revisions(ah);
+
+ /*
+ * Read back AR_WA into a permanent copy and set bits 14 and 17.
+ * We need to do this to avoid RMW of this register. We cannot
+ * read the reg when chip is asleep.
+ */
+ ah->WARegVal = REG_READ(ah, AR_WA);
+ ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
+ AR_WA_ASPM_TIMER_BASED_DISABLE);
+
if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
ath_err(common, "Couldn't reset chip\n");
return -EIO;
@@ -563,14 +574,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
ath9k_hw_init_mode_regs(ah);
- /*
- * Read back AR_WA into a permanent copy and set bits 14 and 17.
- * We need to do this to avoid RMW of this register. We cannot
- * read the reg when chip is asleep.
- */
- ah->WARegVal = REG_READ(ah, AR_WA);
- ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
- AR_WA_ASPM_TIMER_BASED_DISABLE);
if (ah->is_pciexpress)
ath9k_hw_configpcipowersave(ah, 0, 0);
@@ -668,14 +671,51 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
REGWRITE_BUFFER_FLUSH(ah);
}
+unsigned long ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
+{
+ REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) & ~(PLL3_DO_MEAS_MASK)));
+ udelay(100);
+ REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) | PLL3_DO_MEAS_MASK));
+
+ while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
+ udelay(100);
+
+ return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
+}
+EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
+
+#define DPLL2_KD_VAL 0x3D
+#define DPLL2_KI_VAL 0x06
+#define DPLL3_PHASE_SHIFT_VAL 0x1
+
static void ath9k_hw_init_pll(struct ath_hw *ah,
struct ath9k_channel *chan)
{
u32 pll;
- if (AR_SREV_9485(ah))
+ if (AR_SREV_9485(ah)) {
+ REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666);
+ REG_WRITE(ah, AR_CH0_DDR_DPLL2, 0x19e82f01);
+
+ REG_RMW_FIELD(ah, AR_CH0_DDR_DPLL3,
+ AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL);
+
+ REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c);
+ udelay(100);
+
REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666);
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
+ AR_CH0_DPLL2_KD, DPLL2_KD_VAL);
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
+ AR_CH0_DPLL2_KI, DPLL2_KI_VAL);
+
+ REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
+ AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL);
+ REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x142c);
+ udelay(110);
+ }
+
pll = ath9k_hw_compute_pll_control(ah, chan);
REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
@@ -1082,8 +1122,6 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
return false;
}
- ath9k_hw_read_revisions(ah);
-
return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM);
}
@@ -1348,8 +1386,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_spur_mitigate_freq(ah, chan);
ah->eep_ops->set_board_values(ah, chan);
- ath9k_hw_set_operating_mode(ah, ah->opmode);
-
ENABLE_REGWRITE_BUFFER(ah);
REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
@@ -1367,6 +1403,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REGWRITE_BUFFER_FLUSH(ah);
+ ath9k_hw_set_operating_mode(ah, ah->opmode);
+
r = ath9k_hw_rf_set_freq(ah, chan);
if (r)
return r;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index ea9fde6..ef79f4c 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -70,6 +70,9 @@
#define REG_READ(_ah, _reg) \
ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
+#define REG_READ_MULTI(_ah, _addr, _val, _cnt) \
+ ath9k_hw_common(_ah)->ops->multi_read((_ah), (_addr), (_val), (_cnt))
+
#define ENABLE_REGWRITE_BUFFER(_ah) \
do { \
if (ath9k_hw_common(_ah)->ops->enable_write_buffer) \
@@ -926,6 +929,7 @@ void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
void ath9k_hw_reset_tsf(struct ath_hw *ah);
void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
void ath9k_hw_init_global_settings(struct ath_hw *ah);
+unsigned long ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
void ath9k_hw_set11nmac2040(struct ath_hw *ah);
void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 087a6a9..e5c1eea 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -254,8 +254,7 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
return ath_reg_notifier_apply(wiphy, request, reg);
@@ -442,9 +441,10 @@ static int ath9k_init_queues(struct ath_softc *sc)
sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
ath_cabq_update(sc);
- for (i = 0; i < WME_NUM_AC; i++)
+ for (i = 0; i < WME_NUM_AC; i++) {
sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
-
+ sc->tx.txq_map[i]->mac80211_qnum = i;
+ }
return 0;
}
@@ -516,10 +516,8 @@ static void ath9k_init_misc(struct ath_softc *sc)
sc->beacon.slottime = ATH9K_SLOT_TIME_9;
- for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
+ for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
sc->beacon.bslot[i] = NULL;
- sc->beacon.bslot_aphy[i] = NULL;
- }
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
@@ -537,6 +535,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
if (!ah)
return -ENOMEM;
+ ah->hw = sc->hw;
ah->hw_version.devid = devid;
ah->hw_version.subsysid = subsysid;
sc->sc_ah = ah;
@@ -554,10 +553,13 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
common->btcoex_enabled = ath9k_btcoex_enable == 1;
spin_lock_init(&common->cc_lock);
- spin_lock_init(&sc->wiphy_lock);
spin_lock_init(&sc->sc_serial_rw);
spin_lock_init(&sc->sc_pm_lock);
mutex_init(&sc->mutex);
+#ifdef CONFIG_ATH9K_DEBUGFS
+ spin_lock_init(&sc->nodes_lock);
+ INIT_LIST_HEAD(&sc->nodes);
+#endif
tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
(unsigned long)sc);
@@ -699,7 +701,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
const struct ath_bus_ops *bus_ops)
{
struct ieee80211_hw *hw = sc->hw;
- struct ath_wiphy *aphy = hw->priv;
struct ath_common *common;
struct ath_hw *ah;
int error = 0;
@@ -754,10 +755,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
INIT_WORK(&sc->hw_check_work, ath_hw_check);
INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
- INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
- INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
- sc->wiphy_scheduler_int = msecs_to_jiffies(500);
- aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
+ sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
ath_init_leds(sc);
ath_start_rfkill_poll(sc);
@@ -812,7 +810,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
void ath9k_deinit_device(struct ath_softc *sc)
{
struct ieee80211_hw *hw = sc->hw;
- int i = 0;
ath9k_ps_wakeup(sc);
@@ -821,21 +818,11 @@ void ath9k_deinit_device(struct ath_softc *sc)
ath9k_ps_restore(sc);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (aphy == NULL)
- continue;
- sc->sec_wiphy[i] = NULL;
- ieee80211_unregister_hw(aphy->hw);
- ieee80211_free_hw(aphy->hw);
- }
-
ieee80211_unregister_hw(hw);
pm_qos_remove_request(&sc->pm_qos_req);
ath_rx_cleanup(sc);
ath_tx_cleanup(sc);
ath9k_deinit_softc(sc);
- kfree(sc->sec_wiphy);
}
void ath_descdma_cleanup(struct ath_softc *sc,
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 180170d..c75d40f 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -690,17 +690,23 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
+ /*
+ * Treat these errors as mutually exclusive to avoid spurious
+ * extra error reports from the hardware. If a CRC error is
+ * reported, then decryption and MIC errors are irrelevant,
+ * the frame is going to be dropped either way
+ */
if (ads.ds_rxstatus8 & AR_CRCErr)
rs->rs_status |= ATH9K_RXERR_CRC;
- if (ads.ds_rxstatus8 & AR_PHYErr) {
+ else if (ads.ds_rxstatus8 & AR_PHYErr) {
rs->rs_status |= ATH9K_RXERR_PHY;
phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
rs->rs_phyerr = phyerr;
- }
- if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
+ } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
rs->rs_status |= ATH9K_RXERR_DECRYPT;
- if (ads.ds_rxstatus8 & AR_MichaelErr)
+ else if (ads.ds_rxstatus8 & AR_MichaelErr)
rs->rs_status |= ATH9K_RXERR_MIC;
+
if (ads.ds_rxstatus8 & AR_KeyMiss)
rs->rs_status |= ATH9K_RXERR_DECRYPT;
}
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 7512f97..04d58ae 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -639,6 +639,8 @@ enum ath9k_rx_filter {
ATH9K_RX_FILTER_PHYERR = 0x00000100,
ATH9K_RX_FILTER_MYBEACON = 0x00000200,
ATH9K_RX_FILTER_COMP_BAR = 0x00000400,
+ ATH9K_RX_FILTER_COMP_BA = 0x00000800,
+ ATH9K_RX_FILTER_UNCOMP_BA_BAR = 0x00001000,
ATH9K_RX_FILTER_PSPOLL = 0x00004000,
ATH9K_RX_FILTER_PHYRADAR = 0x00002000,
ATH9K_RX_FILTER_MCAST_BCAST_ALL = 0x00008000,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index da5c645..4ed43b2 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -18,17 +18,6 @@
#include "ath9k.h"
#include "btcoex.h"
-static void ath_update_txpow(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- if (sc->curtxpow != sc->config.txpowlimit) {
- ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
- /* read back in case value is clamped */
- sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
- }
-}
-
static u8 parse_mpdudensity(u8 mpdudensity)
{
/*
@@ -64,19 +53,6 @@ static u8 parse_mpdudensity(u8 mpdudensity)
}
}
-static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
- struct ieee80211_hw *hw)
-{
- struct ieee80211_channel *curchan = hw->conf.channel;
- struct ath9k_channel *channel;
- u8 chan_idx;
-
- chan_idx = curchan->hw_value;
- channel = &sc->sc_ah->channels[chan_idx];
- ath9k_update_ichannel(sc, hw, channel);
- return channel;
-}
-
bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
{
unsigned long flags;
@@ -177,7 +153,12 @@ static void ath_update_survey_nf(struct ath_softc *sc, int channel)
}
}
-static void ath_update_survey_stats(struct ath_softc *sc)
+/*
+ * Updates the survey statistics and returns the busy time since last
+ * update in %, if the measurement duration was long enough for the
+ * result to be useful, -1 otherwise.
+ */
+static int ath_update_survey_stats(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -185,9 +166,10 @@ static void ath_update_survey_stats(struct ath_softc *sc)
struct survey_info *survey = &sc->survey[pos];
struct ath_cycle_counters *cc = &common->cc_survey;
unsigned int div = common->clockrate * 1000;
+ int ret = 0;
if (!ah->curchan)
- return;
+ return -1;
if (ah->power_mode == ATH9K_PM_AWAKE)
ath_hw_cycle_counters_update(common);
@@ -202,9 +184,18 @@ static void ath_update_survey_stats(struct ath_softc *sc)
survey->channel_time_rx += cc->rx_frame / div;
survey->channel_time_tx += cc->tx_frame / div;
}
+
+ if (cc->cycles < div)
+ return -1;
+
+ if (cc->cycles > 0)
+ ret = cc->rx_busy * 100 / cc->cycles;
+
memset(cc, 0, sizeof(*cc));
ath_update_survey_nf(sc, pos);
+
+ return ret;
}
/*
@@ -215,7 +206,6 @@ static void ath_update_survey_stats(struct ath_softc *sc)
int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
struct ath9k_channel *hchan)
{
- struct ath_wiphy *aphy = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_conf *conf = &common->hw->conf;
@@ -227,10 +217,13 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
if (sc->sc_flags & SC_OP_INVALID)
return -EIO;
+ sc->hw_busy_count = 0;
+
del_timer_sync(&common->ani.timer);
cancel_work_sync(&sc->paprd_work);
cancel_work_sync(&sc->hw_check_work);
cancel_delayed_work_sync(&sc->tx_complete_work);
+ cancel_delayed_work_sync(&sc->hw_pll_work);
ath9k_ps_wakeup(sc);
@@ -251,6 +244,9 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
if (!ath_stoprecv(sc))
stopped = false;
+ if (!ath9k_hw_check_alive(ah))
+ stopped = false;
+
/* XXX: do not flush receive queue here. We don't want
* to flush data frames already in queue because of
* changing channel. */
@@ -259,7 +255,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
fastcc = false;
if (!(sc->sc_flags & SC_OP_OFFCHANNEL))
- caldata = &aphy->caldata;
+ caldata = &sc->caldata;
ath_dbg(common, ATH_DBG_CONFIG,
"(%u MHz) -> (%u MHz), conf_is_ht40: %d fastcc: %d\n",
@@ -281,17 +277,21 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
goto ps_restore;
}
- ath_update_txpow(sc);
+ ath9k_cmn_update_txpow(ah, sc->curtxpow,
+ sc->config.txpowlimit, &sc->curtxpow);
ath9k_hw_set_interrupts(ah, ah->imask);
if (!(sc->sc_flags & (SC_OP_OFFCHANNEL))) {
if (sc->sc_flags & SC_OP_BEACONS)
ath_beacon_config(sc, NULL);
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
+ ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2);
ath_start_ani(common);
}
ps_restore:
+ ieee80211_wake_queues(hw);
+
spin_unlock_bh(&sc->sc_pcu_lock);
ath9k_ps_restore(sc);
@@ -549,6 +549,12 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
struct ath_hw *ah = sc->sc_ah;
an = (struct ath_node *)sta->drv_priv;
+#ifdef CONFIG_ATH9K_DEBUGFS
+ spin_lock(&sc->nodes_lock);
+ list_add(&an->list, &sc->nodes);
+ spin_unlock(&sc->nodes_lock);
+ an->sta = sta;
+#endif
if ((ah->caps.hw_caps) & ATH9K_HW_CAP_APM)
sc->sc_flags |= SC_OP_ENABLE_APM;
@@ -564,6 +570,13 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
{
struct ath_node *an = (struct ath_node *)sta->drv_priv;
+#ifdef CONFIG_ATH9K_DEBUGFS
+ spin_lock(&sc->nodes_lock);
+ list_del(&an->list);
+ spin_unlock(&sc->nodes_lock);
+ an->sta = NULL;
+#endif
+
if (sc->sc_flags & SC_OP_TXAGGR)
ath_tx_node_cleanup(sc, an);
}
@@ -571,17 +584,25 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
void ath_hw_check(struct work_struct *work)
{
struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
- int i;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ unsigned long flags;
+ int busy;
ath9k_ps_wakeup(sc);
+ if (ath9k_hw_check_alive(sc->sc_ah))
+ goto out;
- for (i = 0; i < 3; i++) {
- if (ath9k_hw_check_alive(sc->sc_ah))
- goto out;
+ spin_lock_irqsave(&common->cc_lock, flags);
+ busy = ath_update_survey_stats(sc);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
- msleep(1);
- }
- ath_reset(sc, true);
+ ath_dbg(common, ATH_DBG_RESET, "Possible baseband hang, "
+ "busy=%d (try %d)\n", busy, sc->hw_busy_count + 1);
+ if (busy >= 99) {
+ if (++sc->hw_busy_count >= 3)
+ ath_reset(sc, true);
+ } else if (busy >= 0)
+ sc->hw_busy_count = 0;
out:
ath9k_ps_restore(sc);
@@ -604,7 +625,15 @@ void ath9k_tasklet(unsigned long data)
ath9k_ps_wakeup(sc);
spin_lock(&sc->sc_pcu_lock);
- if (!ath9k_hw_check_alive(ah))
+ /*
+ * Only run the baseband hang check if beacons stop working in AP or
+ * IBSS mode, because it has a high false positive rate. For station
+ * mode it should not be necessary, since the upper layers will detect
+ * this through a beacon miss automatically and the following channel
+ * change will trigger a hardware reset anyway
+ */
+ if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0 &&
+ !ath9k_hw_check_alive(ah))
ieee80211_queue_work(sc->hw, &sc->hw_check_work);
if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
@@ -783,54 +812,11 @@ chip_reset:
#undef SCHED_INTR
}
-static u32 ath_get_extchanmode(struct ath_softc *sc,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type)
-{
- u32 chanmode = 0;
-
- switch (chan->band) {
- case IEEE80211_BAND_2GHZ:
- switch(channel_type) {
- case NL80211_CHAN_NO_HT:
- case NL80211_CHAN_HT20:
- chanmode = CHANNEL_G_HT20;
- break;
- case NL80211_CHAN_HT40PLUS:
- chanmode = CHANNEL_G_HT40PLUS;
- break;
- case NL80211_CHAN_HT40MINUS:
- chanmode = CHANNEL_G_HT40MINUS;
- break;
- }
- break;
- case IEEE80211_BAND_5GHZ:
- switch(channel_type) {
- case NL80211_CHAN_NO_HT:
- case NL80211_CHAN_HT20:
- chanmode = CHANNEL_A_HT20;
- break;
- case NL80211_CHAN_HT40PLUS:
- chanmode = CHANNEL_A_HT40PLUS;
- break;
- case NL80211_CHAN_HT40MINUS:
- chanmode = CHANNEL_A_HT40MINUS;
- break;
- }
- break;
- default:
- break;
- }
-
- return chanmode;
-}
-
static void ath9k_bss_assoc_info(struct ath_softc *sc,
struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf)
{
- struct ath_wiphy *aphy = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -854,7 +840,7 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
ath_beacon_config(sc, vif);
/* Reset rssi stats */
- aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
+ sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
sc->sc_flags |= SC_OP_ANI_RUN;
@@ -881,7 +867,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
ath9k_hw_configpcipowersave(ah, 0, 0);
if (!ah->curchan)
- ah->curchan = ath_get_curchannel(sc, sc->hw);
+ ah->curchan = ath9k_cmn_get_curchannel(sc->hw, ah);
r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
if (r) {
@@ -890,7 +876,8 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
channel->center_freq, r);
}
- ath_update_txpow(sc);
+ ath9k_cmn_update_txpow(ah, sc->curtxpow,
+ sc->config.txpowlimit, &sc->curtxpow);
if (ath_startrecv(sc) != 0) {
ath_err(common, "Unable to restart recv logic\n");
goto out;
@@ -942,7 +929,7 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
ath_flushrecv(sc); /* flush recv queue */
if (!ah->curchan)
- ah->curchan = ath_get_curchannel(sc, hw);
+ ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
if (r) {
@@ -966,6 +953,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
struct ieee80211_hw *hw = sc->hw;
int r;
+ sc->hw_busy_count = 0;
+
/* Stop ANI */
del_timer_sync(&common->ani.timer);
@@ -993,7 +982,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
* that changes the channel so update any state that
* might change as a result.
*/
- ath_update_txpow(sc);
+ ath9k_cmn_update_txpow(ah, sc->curtxpow,
+ sc->config.txpowlimit, &sc->curtxpow);
if ((sc->sc_flags & SC_OP_BEACONS) || !(sc->sc_flags & (SC_OP_OFFCHANNEL)))
ath_beacon_config(sc, NULL); /* restart beacons */
@@ -1021,38 +1011,13 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
return r;
}
-/* XXX: Remove me once we don't depend on ath9k_channel for all
- * this redundant data */
-void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
- struct ath9k_channel *ichan)
-{
- struct ieee80211_channel *chan = hw->conf.channel;
- struct ieee80211_conf *conf = &hw->conf;
-
- ichan->channel = chan->center_freq;
- ichan->chan = chan;
-
- if (chan->band == IEEE80211_BAND_2GHZ) {
- ichan->chanmode = CHANNEL_G;
- ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM | CHANNEL_G;
- } else {
- ichan->chanmode = CHANNEL_A;
- ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
- }
-
- if (conf_is_ht(conf))
- ichan->chanmode = ath_get_extchanmode(sc, chan,
- conf->channel_type);
-}
-
/**********************/
/* mac80211 callbacks */
/**********************/
static int ath9k_start(struct ieee80211_hw *hw)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_channel *curchan = hw->conf.channel;
@@ -1065,32 +1030,10 @@ static int ath9k_start(struct ieee80211_hw *hw)
mutex_lock(&sc->mutex);
- if (ath9k_wiphy_started(sc)) {
- if (sc->chan_idx == curchan->hw_value) {
- /*
- * Already on the operational channel, the new wiphy
- * can be marked active.
- */
- aphy->state = ATH_WIPHY_ACTIVE;
- ieee80211_wake_queues(hw);
- } else {
- /*
- * Another wiphy is on another channel, start the new
- * wiphy in paused state.
- */
- aphy->state = ATH_WIPHY_PAUSED;
- ieee80211_stop_queues(hw);
- }
- mutex_unlock(&sc->mutex);
- return 0;
- }
- aphy->state = ATH_WIPHY_ACTIVE;
-
/* setup initial channel */
-
sc->chan_idx = curchan->hw_value;
- init_channel = ath_get_curchannel(sc, hw);
+ init_channel = ath9k_cmn_get_curchannel(hw, ah);
/* Reset SERDES registers */
ath9k_hw_configpcipowersave(ah, 0, 0);
@@ -1116,7 +1059,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
* This is needed only to setup initial state
* but it's best done after a reset.
*/
- ath_update_txpow(sc);
+ ath9k_cmn_update_txpow(ah, sc->curtxpow,
+ sc->config.txpowlimit, &sc->curtxpow);
/*
* Setup the hardware after reset:
@@ -1191,19 +1135,11 @@ mutex_unlock:
static int ath9k_tx(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_tx_control txctl;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
- ath_dbg(common, ATH_DBG_XMIT,
- "ath9k: %s: TX in unexpected wiphy state %d\n",
- wiphy_name(hw->wiphy), aphy->state);
- goto exit;
- }
-
if (sc->ps_enabled) {
/*
* mac80211 does not set PM field for normal data frames, so we
@@ -1262,44 +1198,26 @@ exit:
static void ath9k_stop(struct ieee80211_hw *hw)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- int i;
mutex_lock(&sc->mutex);
- aphy->state = ATH_WIPHY_INACTIVE;
-
if (led_blink)
cancel_delayed_work_sync(&sc->ath_led_blink_work);
cancel_delayed_work_sync(&sc->tx_complete_work);
+ cancel_delayed_work_sync(&sc->hw_pll_work);
cancel_work_sync(&sc->paprd_work);
cancel_work_sync(&sc->hw_check_work);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i])
- break;
- }
-
- if (i == sc->num_sec_wiphy) {
- cancel_delayed_work_sync(&sc->wiphy_work);
- cancel_work_sync(&sc->chan_work);
- }
-
if (sc->sc_flags & SC_OP_INVALID) {
ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
mutex_unlock(&sc->mutex);
return;
}
- if (ath9k_wiphy_started(sc)) {
- mutex_unlock(&sc->mutex);
- return; /* another wiphy still in use */
- }
-
/* Ensure HW is awake when we try to shut it down. */
ath9k_ps_wakeup(sc);
@@ -1325,6 +1243,11 @@ static void ath9k_stop(struct ieee80211_hw *hw)
} else
sc->rx.rxlink = NULL;
+ if (sc->rx.frag) {
+ dev_kfree_skb_any(sc->rx.frag);
+ sc->rx.frag = NULL;
+ }
+
/* disable HAL and put h/w to sleep */
ath9k_hw_disable(ah);
ath9k_hw_configpcipowersave(ah, 1, 1);
@@ -1340,7 +1263,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath9k_ps_restore(sc);
sc->ps_idle = true;
- ath9k_set_wiphy_idle(aphy, true);
ath_radio_disable(sc, hw);
sc->sc_flags |= SC_OP_INVALID;
@@ -1352,112 +1274,238 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
}
-static int ath9k_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+bool ath9k_uses_beacons(int type)
+{
+ switch (type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void ath9k_reclaim_beacon(struct ath_softc *sc,
+ struct ieee80211_vif *vif)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
struct ath_vif *avp = (void *)vif->drv_priv;
- enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
- int ret = 0;
- mutex_lock(&sc->mutex);
+ /* Disable SWBA interrupt */
+ sc->sc_ah->imask &= ~ATH9K_INT_SWBA;
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
+ ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
+ tasklet_kill(&sc->bcon_tasklet);
+ ath9k_ps_restore(sc);
+
+ ath_beacon_return(sc, avp);
+ sc->sc_flags &= ~SC_OP_BEACONS;
+
+ if (sc->nbcnvifs > 0) {
+ /* Re-enable beaconing */
+ sc->sc_ah->imask |= ATH9K_INT_SWBA;
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
+ ath9k_ps_restore(sc);
+ }
+}
+
+static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ath9k_vif_iter_data *iter_data = data;
+ int i;
+
+ if (iter_data->hw_macaddr)
+ for (i = 0; i < ETH_ALEN; i++)
+ iter_data->mask[i] &=
+ ~(iter_data->hw_macaddr[i] ^ mac[i]);
switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- ic_opmode = NL80211_IFTYPE_STATION;
+ case NL80211_IFTYPE_AP:
+ iter_data->naps++;
break;
- case NL80211_IFTYPE_WDS:
- ic_opmode = NL80211_IFTYPE_WDS;
+ case NL80211_IFTYPE_STATION:
+ iter_data->nstations++;
break;
case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_AP:
+ iter_data->nadhocs++;
+ break;
case NL80211_IFTYPE_MESH_POINT:
- if (sc->nbcnvifs >= ATH_BCBUF) {
- ret = -ENOBUFS;
- goto out;
- }
- ic_opmode = vif->type;
+ iter_data->nmeshes++;
+ break;
+ case NL80211_IFTYPE_WDS:
+ iter_data->nwds++;
break;
default:
- ath_err(common, "Interface type %d not yet supported\n",
- vif->type);
- ret = -EOPNOTSUPP;
- goto out;
+ iter_data->nothers++;
+ break;
}
+}
- ath_dbg(common, ATH_DBG_CONFIG,
- "Attach a VIF of type: %d\n", ic_opmode);
+/* Called with sc->mutex held. */
+void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ath9k_vif_iter_data *iter_data)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
- /* Set the VIF opmode */
- avp->av_opmode = ic_opmode;
- avp->av_bslot = -1;
+ /*
+ * Use the hardware MAC address as reference, the hardware uses it
+ * together with the BSSID mask when matching addresses.
+ */
+ memset(iter_data, 0, sizeof(*iter_data));
+ iter_data->hw_macaddr = common->macaddr;
+ memset(&iter_data->mask, 0xff, ETH_ALEN);
- sc->nvifs++;
+ if (vif)
+ ath9k_vif_iter(iter_data, vif->addr, vif);
- ath9k_set_bssid_mask(hw, vif);
+ /* Get list of all active MAC addresses */
+ ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
+ iter_data);
+}
- if (sc->nvifs > 1)
- goto out; /* skip global settings for secondary vif */
+/* Called with sc->mutex held. */
+static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_vif_iter_data iter_data;
+
+ ath9k_calculate_iter_data(hw, vif, &iter_data);
+
+ ath9k_ps_wakeup(sc);
+ /* Set BSSID mask. */
+ memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
+ ath_hw_setbssidmask(common);
- if (ic_opmode == NL80211_IFTYPE_AP) {
+ /* Set op-mode & TSF */
+ if (iter_data.naps > 0) {
ath9k_hw_set_tsfadjust(ah, 1);
sc->sc_flags |= SC_OP_TSF_RESET;
- }
+ ah->opmode = NL80211_IFTYPE_AP;
+ } else {
+ ath9k_hw_set_tsfadjust(ah, 0);
+ sc->sc_flags &= ~SC_OP_TSF_RESET;
- /* Set the device opmode */
- ah->opmode = ic_opmode;
+ if (iter_data.nwds + iter_data.nmeshes)
+ ah->opmode = NL80211_IFTYPE_AP;
+ else if (iter_data.nadhocs)
+ ah->opmode = NL80211_IFTYPE_ADHOC;
+ else
+ ah->opmode = NL80211_IFTYPE_STATION;
+ }
/*
* Enable MIB interrupts when there are hardware phy counters.
- * Note we only do this (at the moment) for station mode.
*/
- if ((vif->type == NL80211_IFTYPE_STATION) ||
- (vif->type == NL80211_IFTYPE_ADHOC) ||
- (vif->type == NL80211_IFTYPE_MESH_POINT)) {
+ if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0) {
if (ah->config.enable_ani)
ah->imask |= ATH9K_INT_MIB;
ah->imask |= ATH9K_INT_TSFOOR;
+ } else {
+ ah->imask &= ~ATH9K_INT_MIB;
+ ah->imask &= ~ATH9K_INT_TSFOOR;
}
ath9k_hw_set_interrupts(ah, ah->imask);
+ ath9k_ps_restore(sc);
- if (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_ADHOC) {
+ /* Set up ANI */
+ if ((iter_data.naps + iter_data.nadhocs) > 0) {
sc->sc_flags |= SC_OP_ANI_RUN;
ath_start_ani(common);
+ } else {
+ sc->sc_flags &= ~SC_OP_ANI_RUN;
+ del_timer_sync(&common->ani.timer);
}
+}
-out:
- mutex_unlock(&sc->mutex);
- return ret;
+/* Called with sc->mutex held, vif counts set up properly. */
+static void ath9k_do_vif_add_setup(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath_softc *sc = hw->priv;
+
+ ath9k_calculate_summary_state(hw, vif);
+
+ if (ath9k_uses_beacons(vif->type)) {
+ int error;
+ ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
+ /* This may fail because upper levels do not have beacons
+ * properly configured yet. That's OK, we assume it
+ * will be properly configured and then we will be notified
+ * in the info_changed method and set up beacons properly
+ * there.
+ */
+ error = ath_beacon_alloc(sc, vif);
+ if (!error)
+ ath_beacon_config(sc, vif);
+ }
}
-static void ath9k_reclaim_beacon(struct ath_softc *sc,
- struct ieee80211_vif *vif)
+
+static int ath9k_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
struct ath_vif *avp = (void *)vif->drv_priv;
+ int ret = 0;
- /* Disable SWBA interrupt */
- sc->sc_ah->imask &= ~ATH9K_INT_SWBA;
- ath9k_ps_wakeup(sc);
- ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
- ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
- tasklet_kill(&sc->bcon_tasklet);
- ath9k_ps_restore(sc);
+ mutex_lock(&sc->mutex);
- ath_beacon_return(sc, avp);
- sc->sc_flags &= ~SC_OP_BEACONS;
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
+ break;
+ default:
+ ath_err(common, "Interface type %d not yet supported\n",
+ vif->type);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
- if (sc->nbcnvifs > 0) {
- /* Re-enable beaconing */
- sc->sc_ah->imask |= ATH9K_INT_SWBA;
- ath9k_ps_wakeup(sc);
- ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
- ath9k_ps_restore(sc);
+ if (ath9k_uses_beacons(vif->type)) {
+ if (sc->nbcnvifs >= ATH_BCBUF) {
+ ath_err(common, "Not enough beacon buffers when adding"
+ " new interface of type: %i\n",
+ vif->type);
+ ret = -ENOBUFS;
+ goto out;
+ }
+ }
+
+ if ((vif->type == NL80211_IFTYPE_ADHOC) &&
+ sc->nvifs > 0) {
+ ath_err(common, "Cannot create ADHOC interface when other"
+ " interfaces already exist.\n");
+ ret = -EINVAL;
+ goto out;
}
+
+ ath_dbg(common, ATH_DBG_CONFIG,
+ "Attach a VIF of type: %d\n", vif->type);
+
+ /* Set the VIF opmode */
+ avp->av_opmode = vif->type;
+ avp->av_bslot = -1;
+
+ sc->nvifs++;
+
+ ath9k_do_vif_add_setup(hw, vif);
+out:
+ mutex_unlock(&sc->mutex);
+ return ret;
}
static int ath9k_change_interface(struct ieee80211_hw *hw,
@@ -1465,40 +1513,40 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
enum nl80211_iftype new_type,
bool p2p)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
int ret = 0;
ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n");
mutex_lock(&sc->mutex);
- switch (new_type) {
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_ADHOC:
+ /* See if new interface type is valid. */
+ if ((new_type == NL80211_IFTYPE_ADHOC) &&
+ (sc->nvifs > 1)) {
+ ath_err(common, "When using ADHOC, it must be the only"
+ " interface.\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (ath9k_uses_beacons(new_type) &&
+ !ath9k_uses_beacons(vif->type)) {
if (sc->nbcnvifs >= ATH_BCBUF) {
ath_err(common, "No beacon slot available\n");
ret = -ENOBUFS;
goto out;
}
- break;
- case NL80211_IFTYPE_STATION:
- /* Stop ANI */
- sc->sc_flags &= ~SC_OP_ANI_RUN;
- del_timer_sync(&common->ani.timer);
- if ((vif->type == NL80211_IFTYPE_AP) ||
- (vif->type == NL80211_IFTYPE_ADHOC))
- ath9k_reclaim_beacon(sc, vif);
- break;
- default:
- ath_err(common, "Interface type %d not yet supported\n",
- vif->type);
- ret = -ENOTSUPP;
- goto out;
}
+
+ /* Clean up old vif stuff */
+ if (ath9k_uses_beacons(vif->type))
+ ath9k_reclaim_beacon(sc, vif);
+
+ /* Add new settings */
vif->type = new_type;
vif->p2p = p2p;
+ ath9k_do_vif_add_setup(hw, vif);
out:
mutex_unlock(&sc->mutex);
return ret;
@@ -1507,25 +1555,20 @@ out:
static void ath9k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
mutex_lock(&sc->mutex);
- /* Stop ANI */
- sc->sc_flags &= ~SC_OP_ANI_RUN;
- del_timer_sync(&common->ani.timer);
+ sc->nvifs--;
/* Reclaim beacon resources */
- if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
- (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
- (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT))
+ if (ath9k_uses_beacons(vif->type))
ath9k_reclaim_beacon(sc, vif);
- sc->nvifs--;
+ ath9k_calculate_summary_state(hw, NULL);
mutex_unlock(&sc->mutex);
}
@@ -1566,12 +1609,11 @@ static void ath9k_disable_ps(struct ath_softc *sc)
static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_conf *conf = &hw->conf;
- bool disable_radio;
+ bool disable_radio = false;
mutex_lock(&sc->mutex);
@@ -1582,29 +1624,13 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
* the end.
*/
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
- bool enable_radio;
- bool all_wiphys_idle;
- bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
-
- spin_lock_bh(&sc->wiphy_lock);
- all_wiphys_idle = ath9k_all_wiphys_idle(sc);
- ath9k_set_wiphy_idle(aphy, idle);
-
- enable_radio = (!idle && all_wiphys_idle);
-
- /*
- * After we unlock here its possible another wiphy
- * can be re-renabled so to account for that we will
- * only disable the radio toward the end of this routine
- * if by then all wiphys are still idle.
- */
- spin_unlock_bh(&sc->wiphy_lock);
-
- if (enable_radio) {
- sc->ps_idle = false;
+ sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
+ if (!sc->ps_idle) {
ath_radio_enable(sc, hw);
ath_dbg(common, ATH_DBG_CONFIG,
"not-idle: enabling radio\n");
+ } else {
+ disable_radio = true;
}
}
@@ -1645,29 +1671,16 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (ah->curchan)
old_pos = ah->curchan - &ah->channels[0];
- aphy->chan_idx = pos;
- aphy->chan_is_ht = conf_is_ht(conf);
if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
sc->sc_flags |= SC_OP_OFFCHANNEL;
else
sc->sc_flags &= ~SC_OP_OFFCHANNEL;
- if (aphy->state == ATH_WIPHY_SCAN ||
- aphy->state == ATH_WIPHY_ACTIVE)
- ath9k_wiphy_pause_all_forced(sc, aphy);
- else {
- /*
- * Do not change operational channel based on a paused
- * wiphy changes.
- */
- goto skip_chan_change;
- }
-
ath_dbg(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
curchan->center_freq);
- /* XXX: remove me eventualy */
- ath9k_update_ichannel(sc, hw, &sc->sc_ah->channels[pos]);
+ ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
+ curchan, conf->channel_type);
/* update survey stats for the old channel before switching */
spin_lock_irqsave(&common->cc_lock, flags);
@@ -1709,21 +1722,16 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
ath_update_survey_nf(sc, old_pos);
}
-skip_chan_change:
if (changed & IEEE80211_CONF_CHANGE_POWER) {
sc->config.txpowlimit = 2 * conf->power_level;
ath9k_ps_wakeup(sc);
- ath_update_txpow(sc);
+ ath9k_cmn_update_txpow(ah, sc->curtxpow,
+ sc->config.txpowlimit, &sc->curtxpow);
ath9k_ps_restore(sc);
}
- spin_lock_bh(&sc->wiphy_lock);
- disable_radio = ath9k_all_wiphys_idle(sc);
- spin_unlock_bh(&sc->wiphy_lock);
-
if (disable_radio) {
ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
- sc->ps_idle = true;
ath_radio_disable(sc, hw);
}
@@ -1748,8 +1756,7 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
unsigned int *total_flags,
u64 multicast)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
u32 rfilt;
changed_flags &= SUPPORTED_FILTERS;
@@ -1769,8 +1776,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
ath_node_attach(sc, sta);
@@ -1781,8 +1787,7 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
ath_node_detach(sc, sta);
@@ -1792,8 +1797,7 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw,
static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_txq *txq;
struct ath9k_tx_queue_info qi;
@@ -1837,8 +1841,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
int ret = 0;
@@ -1882,8 +1885,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_vif *avp = (void *)vif->drv_priv;
@@ -1913,7 +1915,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
if ((changed & BSS_CHANGED_BEACON) ||
((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon)) {
ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
- error = ath_beacon_alloc(aphy, vif);
+ error = ath_beacon_alloc(sc, vif);
if (!error)
ath_beacon_config(sc, vif);
}
@@ -1950,7 +1952,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_AP) {
sc->sc_flags |= SC_OP_TSF_RESET;
ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
- error = ath_beacon_alloc(aphy, vif);
+ error = ath_beacon_alloc(sc, vif);
if (!error)
ath_beacon_config(sc, vif);
} else {
@@ -1988,9 +1990,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
{
+ struct ath_softc *sc = hw->priv;
u64 tsf;
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
@@ -2003,8 +2004,7 @@ static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
@@ -2015,8 +2015,7 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
static void ath9k_reset_tsf(struct ieee80211_hw *hw)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
mutex_lock(&sc->mutex);
@@ -2031,10 +2030,9 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta,
- u16 tid, u16 *ssn)
+ u16 tid, u16 *ssn, u8 buf_size)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
int ret = 0;
local_bh_disable();
@@ -2079,8 +2077,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
@@ -2114,47 +2111,9 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
return 0;
}
-static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
-{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
-
- mutex_lock(&sc->mutex);
- if (ath9k_wiphy_scanning(sc)) {
- /*
- * There is a race here in mac80211 but fixing it requires
- * we revisit how we handle the scan complete callback.
- * After mac80211 fixes we will not have configured hardware
- * to the home channel nor would we have configured the RX
- * filter yet.
- */
- mutex_unlock(&sc->mutex);
- return;
- }
-
- aphy->state = ATH_WIPHY_SCAN;
- ath9k_wiphy_pause_all_forced(sc, aphy);
- mutex_unlock(&sc->mutex);
-}
-
-/*
- * XXX: this requires a revisit after the driver
- * scan_complete gets moved to another place/removed in mac80211.
- */
-static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
-{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
-
- mutex_lock(&sc->mutex);
- aphy->state = ATH_WIPHY_ACTIVE;
- mutex_unlock(&sc->mutex);
-}
-
static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
mutex_lock(&sc->mutex);
@@ -2182,8 +2141,6 @@ struct ieee80211_ops ath9k_ops = {
.reset_tsf = ath9k_reset_tsf,
.ampdu_action = ath9k_ampdu_action,
.get_survey = ath9k_get_survey,
- .sw_scan_start = ath9k_sw_scan_start,
- .sw_scan_complete = ath9k_sw_scan_complete,
.rfkill_poll = ath9k_rfkill_poll_state,
.set_coverage_class = ath9k_set_coverage_class,
};
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 78ef1f1..e83128c 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -126,7 +126,6 @@ static const struct ath_bus_ops ath_pci_bus_ops = {
static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
void __iomem *mem;
- struct ath_wiphy *aphy;
struct ath_softc *sc;
struct ieee80211_hw *hw;
u8 csz;
@@ -198,8 +197,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_iomap;
}
- hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) +
- sizeof(struct ath_softc), &ath9k_ops);
+ hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
if (!hw) {
dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
ret = -ENOMEM;
@@ -209,11 +207,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
SET_IEEE80211_DEV(hw, &pdev->dev);
pci_set_drvdata(pdev, hw);
- aphy = hw->priv;
- sc = (struct ath_softc *) (aphy + 1);
- aphy->sc = sc;
- aphy->hw = hw;
- sc->pri_wiphy = aphy;
+ sc = hw->priv;
sc->hw = hw;
sc->dev = &pdev->dev;
sc->mem = mem;
@@ -260,8 +254,7 @@ err_dma:
static void ath_pci_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
void __iomem *mem = sc->mem;
if (!is_ath9k_unloaded)
@@ -281,8 +274,7 @@ static int ath_pci_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
@@ -293,8 +285,7 @@ static int ath_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
u32 val;
/*
@@ -320,7 +311,6 @@ static int ath_pci_resume(struct device *device)
ath9k_ps_restore(sc);
sc->ps_idle = true;
- ath9k_set_wiphy_idle(aphy, true);
ath_radio_disable(sc, hw);
return 0;
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index e451478..960d717 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1560,8 +1560,7 @@ static void ath_rate_add_sta_debugfs(void *priv, void *priv_sta,
static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
{
- struct ath_wiphy *aphy = hw->priv;
- return aphy->sc;
+ return hw->priv;
}
static void ath_rate_free(void *priv)
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index b2497b8..daf171d 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -34,27 +34,6 @@ static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP);
}
-static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
- struct ieee80211_hdr *hdr)
-{
- struct ieee80211_hw *hw = sc->pri_wiphy->hw;
- int i;
-
- spin_lock_bh(&sc->wiphy_lock);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (aphy == NULL)
- continue;
- if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr)
- == 0) {
- hw = aphy->hw;
- break;
- }
- }
- spin_unlock_bh(&sc->wiphy_lock);
- return hw;
-}
-
/*
* Setup and link descriptors.
*
@@ -230,11 +209,6 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
int error = 0, i;
u32 size;
-
- common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN +
- ah->caps.rx_status_len,
- min(common->cachelsz, (u16)64));
-
ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
ah->caps.rx_status_len);
@@ -321,12 +295,12 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
sc->sc_flags &= ~SC_OP_RXFLUSH;
spin_lock_init(&sc->rx.rxbuflock);
+ common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
+ sc->sc_ah->caps.rx_status_len;
+
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
return ath_rx_edma_init(sc, nbufs);
} else {
- common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
- min(common->cachelsz, (u16)64));
-
ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
common->cachelsz, common->rx_bufsize);
@@ -463,8 +437,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
if (conf_is_ht(&sc->hw->conf))
rfilt |= ATH9K_RX_FILTER_COMP_BAR;
- if (sc->sec_wiphy || (sc->nvifs > 1) ||
- (sc->rx.rxfilter & FIF_OTHER_BSS)) {
+ if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) {
/* The following may also be needed for other older chips */
if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160)
rfilt |= ATH9K_RX_FILTER_PROM;
@@ -588,8 +561,14 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
return;
mgmt = (struct ieee80211_mgmt *)skb->data;
- if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
+ if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) {
+ /* TODO: This doesn't work well if you have stations
+ * associated to two different APs because curbssid
+ * is just the last AP that any of the stations associated
+ * with.
+ */
return; /* not from our current AP */
+ }
sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
@@ -662,37 +641,6 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
}
}
-static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
- struct ath_softc *sc, struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr;
-
- hdr = (struct ieee80211_hdr *)skb->data;
-
- /* Send the frame to mac80211 */
- if (is_multicast_ether_addr(hdr->addr1)) {
- int i;
- /*
- * Deliver broadcast/multicast frames to all suitable
- * virtual wiphys.
- */
- /* TODO: filter based on channel configuration */
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- struct sk_buff *nskb;
- if (aphy == NULL)
- continue;
- nskb = skb_copy(skb, GFP_ATOMIC);
- if (!nskb)
- continue;
- ieee80211_rx(aphy->hw, nskb);
- }
- ieee80211_rx(sc->hw, skb);
- } else
- /* Deliver unicast frames based on receiver address */
- ieee80211_rx(hw, skb);
-}
-
static bool ath_edma_get_buffers(struct ath_softc *sc,
enum ath9k_rx_qtype qtype)
{
@@ -862,15 +810,9 @@ static bool ath9k_rx_accept(struct ath_common *common,
if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
return false;
- /*
- * rs_more indicates chained descriptors which can be used
- * to link buffers together for a sort of scatter-gather
- * operation.
- * reject the frame, we don't support scatter-gather yet and
- * the frame is probably corrupt anyway
- */
+ /* Only use error bits from the last fragment */
if (rx_stats->rs_more)
- return false;
+ return true;
/*
* The rx_stats->rs_status will not be set until the end of the
@@ -974,7 +916,7 @@ static void ath9k_process_rssi(struct ath_common *common,
struct ieee80211_hdr *hdr,
struct ath_rx_status *rx_stats)
{
- struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = hw->priv;
struct ath_hw *ah = common->ah;
int last_rssi;
__le16 fc;
@@ -984,13 +926,19 @@ static void ath9k_process_rssi(struct ath_common *common,
fc = hdr->frame_control;
if (!ieee80211_is_beacon(fc) ||
- compare_ether_addr(hdr->addr3, common->curbssid))
+ compare_ether_addr(hdr->addr3, common->curbssid)) {
+ /* TODO: This doesn't work well if you have stations
+ * associated to two different APs because curbssid
+ * is just the last AP that any of the stations associated
+ * with.
+ */
return;
+ }
if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
- ATH_RSSI_LPF(aphy->last_rssi, rx_stats->rs_rssi);
+ ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
- last_rssi = aphy->last_rssi;
+ last_rssi = sc->last_rssi;
if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
ATH_RSSI_EP_MULTIPLIER);
@@ -1022,6 +970,10 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common,
if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
return -EINVAL;
+ /* Only use status info from the last fragment */
+ if (rx_stats->rs_more)
+ return 0;
+
ath9k_process_rssi(common, hw, hdr, rx_stats);
if (ath9k_process_rate(common, hw, rx_stats, rx_status))
@@ -1623,7 +1575,7 @@ div_comb_done:
int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
{
struct ath_buf *bf;
- struct sk_buff *skb = NULL, *requeue_skb;
+ struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
struct ieee80211_rx_status *rxs;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -1632,7 +1584,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
* virtual wiphy so to account for that we iterate over the active
* wiphys and find the appropriate wiphy and therefore hw.
*/
- struct ieee80211_hw *hw = NULL;
+ struct ieee80211_hw *hw = sc->hw;
struct ieee80211_hdr *hdr;
int retval;
bool decrypt_error = false;
@@ -1674,10 +1626,17 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (!skb)
continue;
- hdr = (struct ieee80211_hdr *) (skb->data + rx_status_len);
- rxs = IEEE80211_SKB_RXCB(skb);
+ /*
+ * Take frame header from the first fragment and RX status from
+ * the last one.
+ */
+ if (sc->rx.frag)
+ hdr_skb = sc->rx.frag;
+ else
+ hdr_skb = skb;
- hw = ath_get_virt_hw(sc, hdr);
+ hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
+ rxs = IEEE80211_SKB_RXCB(hdr_skb);
ath_debug_stat_rx(sc, &rs);
@@ -1686,12 +1645,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
* chain it back at the queue without processing it.
*/
if (flush)
- goto requeue;
+ goto requeue_drop_frag;
retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
rxs, &decrypt_error);
if (retval)
- goto requeue;
+ goto requeue_drop_frag;
rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
if (rs.rs_tstamp > tsf_lower &&
@@ -1711,7 +1670,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
* skb and put it at the tail of the sc->rx.rxbuf list for
* processing. */
if (!requeue_skb)
- goto requeue;
+ goto requeue_drop_frag;
/* Unmap the frame */
dma_unmap_single(sc->dev, bf->bf_buf_addr,
@@ -1722,8 +1681,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (ah->caps.rx_status_len)
skb_pull(skb, ah->caps.rx_status_len);
- ath9k_rx_skb_postprocess(common, skb, &rs,
- rxs, decrypt_error);
+ if (!rs.rs_more)
+ ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
+ rxs, decrypt_error);
/* We will now give hardware our shiny new allocated skb */
bf->bf_mpdu = requeue_skb;
@@ -1736,10 +1696,42 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
bf->bf_mpdu = NULL;
bf->bf_buf_addr = 0;
ath_err(common, "dma_mapping_error() on RX\n");
- ath_rx_send_to_mac80211(hw, sc, skb);
+ ieee80211_rx(hw, skb);
break;
}
+ if (rs.rs_more) {
+ /*
+ * rs_more indicates chained descriptors which can be
+ * used to link buffers together for a sort of
+ * scatter-gather operation.
+ */
+ if (sc->rx.frag) {
+ /* too many fragments - cannot handle frame */
+ dev_kfree_skb_any(sc->rx.frag);
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ }
+ sc->rx.frag = skb;
+ goto requeue;
+ }
+
+ if (sc->rx.frag) {
+ int space = skb->len - skb_tailroom(hdr_skb);
+
+ sc->rx.frag = NULL;
+
+ if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
+ dev_kfree_skb(skb);
+ goto requeue_drop_frag;
+ }
+
+ skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len),
+ skb->len);
+ dev_kfree_skb_any(skb);
+ skb = hdr_skb;
+ }
+
/*
* change the default rx antenna if rx diversity chooses the
* other antenna 3 times in a row.
@@ -1763,8 +1755,13 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
ath_ant_comb_scan(sc, &rs);
- ath_rx_send_to_mac80211(hw, sc, skb);
+ ieee80211_rx(hw, skb);
+requeue_drop_frag:
+ if (sc->rx.frag) {
+ dev_kfree_skb_any(sc->rx.frag);
+ sc->rx.frag = NULL;
+ }
requeue:
if (edma) {
list_add_tail(&bf->list, &sc->rx.rxbuf);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 4df5659..b262e98 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -1083,6 +1083,17 @@ enum {
#define AR_ENT_OTP 0x40d8
#define AR_ENT_OTP_CHAIN2_DISABLE 0x00020000
#define AR_ENT_OTP_MPSD 0x00800000
+#define AR_CH0_BB_DPLL2 0x16184
+#define AR_CH0_BB_DPLL3 0x16188
+#define AR_CH0_DDR_DPLL2 0x16244
+#define AR_CH0_DDR_DPLL3 0x16248
+#define AR_CH0_DPLL2_KD 0x03F80000
+#define AR_CH0_DPLL2_KD_S 19
+#define AR_CH0_DPLL2_KI 0x3C000000
+#define AR_CH0_DPLL2_KI_S 26
+#define AR_CH0_DPLL3_PHASE_SHIFT 0x3F800000
+#define AR_CH0_DPLL3_PHASE_SHIFT_S 23
+#define AR_PHY_CCA_NOM_VAL_2GHZ -118
#define AR_RTC_9300_PLL_DIV 0x000003ff
#define AR_RTC_9300_PLL_DIV_S 0
@@ -1129,6 +1140,12 @@ enum {
#define AR_RTC_PLL_CLKSEL 0x00000300
#define AR_RTC_PLL_CLKSEL_S 8
+#define PLL3 0x16188
+#define PLL3_DO_MEAS_MASK 0x40000000
+#define PLL4 0x1618c
+#define PLL4_MEAS_DONE 0x8
+#define SQSUM_DVC_MASK 0x007ffff8
+
#define AR_RTC_RESET \
((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040)
#define AR_RTC_RESET_EN (0x00000001)
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
deleted file mode 100644
index 2dc7095..0000000
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ /dev/null
@@ -1,717 +0,0 @@
-/*
- * Copyright (c) 2008-2009 Atheros Communications Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/slab.h>
-
-#include "ath9k.h"
-
-struct ath9k_vif_iter_data {
- const u8 *hw_macaddr;
- u8 mask[ETH_ALEN];
-};
-
-static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
- struct ath9k_vif_iter_data *iter_data = data;
- int i;
-
- for (i = 0; i < ETH_ALEN; i++)
- iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
-}
-
-void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath9k_vif_iter_data iter_data;
- int i;
-
- /*
- * Use the hardware MAC address as reference, the hardware uses it
- * together with the BSSID mask when matching addresses.
- */
- iter_data.hw_macaddr = common->macaddr;
- memset(&iter_data.mask, 0xff, ETH_ALEN);
-
- if (vif)
- ath9k_vif_iter(&iter_data, vif->addr, vif);
-
- /* Get list of all active MAC addresses */
- spin_lock_bh(&sc->wiphy_lock);
- ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
- &iter_data);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] == NULL)
- continue;
- ieee80211_iterate_active_interfaces_atomic(
- sc->sec_wiphy[i]->hw, ath9k_vif_iter, &iter_data);
- }
- spin_unlock_bh(&sc->wiphy_lock);
-
- memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
- ath_hw_setbssidmask(common);
-}
-
-int ath9k_wiphy_add(struct ath_softc *sc)
-{
- int i, error;
- struct ath_wiphy *aphy;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ieee80211_hw *hw;
- u8 addr[ETH_ALEN];
-
- hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops);
- if (hw == NULL)
- return -ENOMEM;
-
- spin_lock_bh(&sc->wiphy_lock);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] == NULL)
- break;
- }
-
- if (i == sc->num_sec_wiphy) {
- /* No empty slot available; increase array length */
- struct ath_wiphy **n;
- n = krealloc(sc->sec_wiphy,
- (sc->num_sec_wiphy + 1) *
- sizeof(struct ath_wiphy *),
- GFP_ATOMIC);
- if (n == NULL) {
- spin_unlock_bh(&sc->wiphy_lock);
- ieee80211_free_hw(hw);
- return -ENOMEM;
- }
- n[i] = NULL;
- sc->sec_wiphy = n;
- sc->num_sec_wiphy++;
- }
-
- SET_IEEE80211_DEV(hw, sc->dev);
-
- aphy = hw->priv;
- aphy->sc = sc;
- aphy->hw = hw;
- sc->sec_wiphy[i] = aphy;
- aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
- spin_unlock_bh(&sc->wiphy_lock);
-
- memcpy(addr, common->macaddr, ETH_ALEN);
- addr[0] |= 0x02; /* Locally managed address */
- /*
- * XOR virtual wiphy index into the least significant bits to generate
- * a different MAC address for each virtual wiphy.
- */
- addr[5] ^= i & 0xff;
- addr[4] ^= (i & 0xff00) >> 8;
- addr[3] ^= (i & 0xff0000) >> 16;
-
- SET_IEEE80211_PERM_ADDR(hw, addr);
-
- ath9k_set_hw_capab(sc, hw);
-
- error = ieee80211_register_hw(hw);
-
- if (error == 0) {
- /* Make sure wiphy scheduler is started (if enabled) */
- ath9k_wiphy_set_scheduler(sc, sc->wiphy_scheduler_int);
- }
-
- return error;
-}
-
-int ath9k_wiphy_del(struct ath_wiphy *aphy)
-{
- struct ath_softc *sc = aphy->sc;
- int i;
-
- spin_lock_bh(&sc->wiphy_lock);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (aphy == sc->sec_wiphy[i]) {
- sc->sec_wiphy[i] = NULL;
- spin_unlock_bh(&sc->wiphy_lock);
- ieee80211_unregister_hw(aphy->hw);
- ieee80211_free_hw(aphy->hw);
- return 0;
- }
- }
- spin_unlock_bh(&sc->wiphy_lock);
- return -ENOENT;
-}
-
-static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
- struct ieee80211_vif *vif, const u8 *bssid,
- int ps)
-{
- struct ath_softc *sc = aphy->sc;
- struct ath_tx_control txctl;
- struct sk_buff *skb;
- struct ieee80211_hdr *hdr;
- __le16 fc;
- struct ieee80211_tx_info *info;
-
- skb = dev_alloc_skb(24);
- if (skb == NULL)
- return -ENOMEM;
- hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
- memset(hdr, 0, 24);
- fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
- IEEE80211_FCTL_TODS);
- if (ps)
- fc |= cpu_to_le16(IEEE80211_FCTL_PM);
- hdr->frame_control = fc;
- memcpy(hdr->addr1, bssid, ETH_ALEN);
- memcpy(hdr->addr2, aphy->hw->wiphy->perm_addr, ETH_ALEN);
- memcpy(hdr->addr3, bssid, ETH_ALEN);
-
- info = IEEE80211_SKB_CB(skb);
- memset(info, 0, sizeof(*info));
- info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS;
- info->control.vif = vif;
- info->control.rates[0].idx = 0;
- info->control.rates[0].count = 4;
- info->control.rates[1].idx = -1;
-
- memset(&txctl, 0, sizeof(struct ath_tx_control));
- txctl.txq = sc->tx.txq_map[WME_AC_VO];
- txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
-
- if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
- goto exit;
-
- return 0;
-exit:
- dev_kfree_skb_any(skb);
- return -1;
-}
-
-static bool __ath9k_wiphy_pausing(struct ath_softc *sc)
-{
- int i;
- if (sc->pri_wiphy->state == ATH_WIPHY_PAUSING)
- return true;
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] &&
- sc->sec_wiphy[i]->state == ATH_WIPHY_PAUSING)
- return true;
- }
- return false;
-}
-
-static bool ath9k_wiphy_pausing(struct ath_softc *sc)
-{
- bool ret;
- spin_lock_bh(&sc->wiphy_lock);
- ret = __ath9k_wiphy_pausing(sc);
- spin_unlock_bh(&sc->wiphy_lock);
- return ret;
-}
-
-static bool __ath9k_wiphy_scanning(struct ath_softc *sc)
-{
- int i;
- if (sc->pri_wiphy->state == ATH_WIPHY_SCAN)
- return true;
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] &&
- sc->sec_wiphy[i]->state == ATH_WIPHY_SCAN)
- return true;
- }
- return false;
-}
-
-bool ath9k_wiphy_scanning(struct ath_softc *sc)
-{
- bool ret;
- spin_lock_bh(&sc->wiphy_lock);
- ret = __ath9k_wiphy_scanning(sc);
- spin_unlock_bh(&sc->wiphy_lock);
- return ret;
-}
-
-static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy);
-
-/* caller must hold wiphy_lock */
-static void __ath9k_wiphy_unpause_ch(struct ath_wiphy *aphy)
-{
- if (aphy == NULL)
- return;
- if (aphy->chan_idx != aphy->sc->chan_idx)
- return; /* wiphy not on the selected channel */
- __ath9k_wiphy_unpause(aphy);
-}
-
-static void ath9k_wiphy_unpause_channel(struct ath_softc *sc)
-{
- int i;
- spin_lock_bh(&sc->wiphy_lock);
- __ath9k_wiphy_unpause_ch(sc->pri_wiphy);
- for (i = 0; i < sc->num_sec_wiphy; i++)
- __ath9k_wiphy_unpause_ch(sc->sec_wiphy[i]);
- spin_unlock_bh(&sc->wiphy_lock);
-}
-
-void ath9k_wiphy_chan_work(struct work_struct *work)
-{
- struct ath_softc *sc = container_of(work, struct ath_softc, chan_work);
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_wiphy *aphy = sc->next_wiphy;
-
- if (aphy == NULL)
- return;
-
- /*
- * All pending interfaces paused; ready to change
- * channels.
- */
-
- /* Change channels */
- mutex_lock(&sc->mutex);
- /* XXX: remove me eventually */
- ath9k_update_ichannel(sc, aphy->hw,
- &sc->sc_ah->channels[sc->chan_idx]);
-
- /* sync hw configuration for hw code */
- common->hw = aphy->hw;
-
- if (ath_set_channel(sc, aphy->hw,
- &sc->sc_ah->channels[sc->chan_idx]) < 0) {
- printk(KERN_DEBUG "ath9k: Failed to set channel for new "
- "virtual wiphy\n");
- mutex_unlock(&sc->mutex);
- return;
- }
- mutex_unlock(&sc->mutex);
-
- ath9k_wiphy_unpause_channel(sc);
-}
-
-/*
- * ath9k version of ieee80211_tx_status() for TX frames that are generated
- * internally in the driver.
- */
-void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ftype)
-{
- struct ath_wiphy *aphy = hw->priv;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
-
- if (ftype == ATH9K_IFT_PAUSE && aphy->state == ATH_WIPHY_PAUSING) {
- if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
- printk(KERN_DEBUG "ath9k: %s: no ACK for pause "
- "frame\n", wiphy_name(hw->wiphy));
- /*
- * The AP did not reply; ignore this to allow us to
- * continue.
- */
- }
- aphy->state = ATH_WIPHY_PAUSED;
- if (!ath9k_wiphy_pausing(aphy->sc)) {
- /*
- * Drop from tasklet to work to allow mutex for channel
- * change.
- */
- ieee80211_queue_work(aphy->sc->hw,
- &aphy->sc->chan_work);
- }
- }
-
- dev_kfree_skb(skb);
-}
-
-static void ath9k_mark_paused(struct ath_wiphy *aphy)
-{
- struct ath_softc *sc = aphy->sc;
- aphy->state = ATH_WIPHY_PAUSED;
- if (!__ath9k_wiphy_pausing(sc))
- ieee80211_queue_work(sc->hw, &sc->chan_work);
-}
-
-static void ath9k_pause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
- struct ath_wiphy *aphy = data;
- struct ath_vif *avp = (void *) vif->drv_priv;
-
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- if (!vif->bss_conf.assoc) {
- ath9k_mark_paused(aphy);
- break;
- }
- /* TODO: could avoid this if already in PS mode */
- if (ath9k_send_nullfunc(aphy, vif, avp->bssid, 1)) {
- printk(KERN_DEBUG "%s: failed to send PS nullfunc\n",
- __func__);
- ath9k_mark_paused(aphy);
- }
- break;
- case NL80211_IFTYPE_AP:
- /* Beacon transmission is paused by aphy->state change */
- ath9k_mark_paused(aphy);
- break;
- default:
- break;
- }
-}
-
-/* caller must hold wiphy_lock */
-static int __ath9k_wiphy_pause(struct ath_wiphy *aphy)
-{
- ieee80211_stop_queues(aphy->hw);
- aphy->state = ATH_WIPHY_PAUSING;
- /*
- * TODO: handle PAUSING->PAUSED for the case where there are multiple
- * active vifs (now we do it on the first vif getting ready; should be
- * on the last)
- */
- ieee80211_iterate_active_interfaces_atomic(aphy->hw, ath9k_pause_iter,
- aphy);
- return 0;
-}
-
-int ath9k_wiphy_pause(struct ath_wiphy *aphy)
-{
- int ret;
- spin_lock_bh(&aphy->sc->wiphy_lock);
- ret = __ath9k_wiphy_pause(aphy);
- spin_unlock_bh(&aphy->sc->wiphy_lock);
- return ret;
-}
-
-static void ath9k_unpause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
- struct ath_wiphy *aphy = data;
- struct ath_vif *avp = (void *) vif->drv_priv;
-
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- if (!vif->bss_conf.assoc)
- break;
- ath9k_send_nullfunc(aphy, vif, avp->bssid, 0);
- break;
- case NL80211_IFTYPE_AP:
- /* Beacon transmission is re-enabled by aphy->state change */
- break;
- default:
- break;
- }
-}
-
-/* caller must hold wiphy_lock */
-static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy)
-{
- ieee80211_iterate_active_interfaces_atomic(aphy->hw,
- ath9k_unpause_iter, aphy);
- aphy->state = ATH_WIPHY_ACTIVE;
- ieee80211_wake_queues(aphy->hw);
- return 0;
-}
-
-int ath9k_wiphy_unpause(struct ath_wiphy *aphy)
-{
- int ret;
- spin_lock_bh(&aphy->sc->wiphy_lock);
- ret = __ath9k_wiphy_unpause(aphy);
- spin_unlock_bh(&aphy->sc->wiphy_lock);
- return ret;
-}
-
-static void __ath9k_wiphy_mark_all_paused(struct ath_softc *sc)
-{
- int i;
- if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE)
- sc->pri_wiphy->state = ATH_WIPHY_PAUSED;
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] &&
- sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE)
- sc->sec_wiphy[i]->state = ATH_WIPHY_PAUSED;
- }
-}
-
-/* caller must hold wiphy_lock */
-static void __ath9k_wiphy_pause_all(struct ath_softc *sc)
-{
- int i;
- if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
- __ath9k_wiphy_pause(sc->pri_wiphy);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] &&
- sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
- __ath9k_wiphy_pause(sc->sec_wiphy[i]);
- }
-}
-
-int ath9k_wiphy_select(struct ath_wiphy *aphy)
-{
- struct ath_softc *sc = aphy->sc;
- bool now;
-
- spin_lock_bh(&sc->wiphy_lock);
- if (__ath9k_wiphy_scanning(sc)) {
- /*
- * For now, we are using mac80211 sw scan and it expects to
- * have full control over channel changes, so avoid wiphy
- * scheduling during a scan. This could be optimized if the
- * scanning control were moved into the driver.
- */
- spin_unlock_bh(&sc->wiphy_lock);
- return -EBUSY;
- }
- if (__ath9k_wiphy_pausing(sc)) {
- if (sc->wiphy_select_failures == 0)
- sc->wiphy_select_first_fail = jiffies;
- sc->wiphy_select_failures++;
- if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2))
- {
- printk(KERN_DEBUG "ath9k: Previous wiphy select timed "
- "out; disable/enable hw to recover\n");
- __ath9k_wiphy_mark_all_paused(sc);
- /*
- * TODO: this workaround to fix hardware is unlikely to
- * be specific to virtual wiphy changes. It can happen
- * on normal channel change, too, and as such, this
- * should really be made more generic. For example,
- * tricker radio disable/enable on GTT interrupt burst
- * (say, 10 GTT interrupts received without any TX
- * frame being completed)
- */
- spin_unlock_bh(&sc->wiphy_lock);
- ath_radio_disable(sc, aphy->hw);
- ath_radio_enable(sc, aphy->hw);
- /* Only the primary wiphy hw is used for queuing work */
- ieee80211_queue_work(aphy->sc->hw,
- &aphy->sc->chan_work);
- return -EBUSY; /* previous select still in progress */
- }
- spin_unlock_bh(&sc->wiphy_lock);
- return -EBUSY; /* previous select still in progress */
- }
- sc->wiphy_select_failures = 0;
-
- /* Store the new channel */
- sc->chan_idx = aphy->chan_idx;
- sc->chan_is_ht = aphy->chan_is_ht;
- sc->next_wiphy = aphy;
-
- __ath9k_wiphy_pause_all(sc);
- now = !__ath9k_wiphy_pausing(aphy->sc);
- spin_unlock_bh(&sc->wiphy_lock);
-
- if (now) {
- /* Ready to request channel change immediately */
- ieee80211_queue_work(aphy->sc->hw, &aphy->sc->chan_work);
- }
-
- /*
- * wiphys will be unpaused in ath9k_tx_status() once channel has been
- * changed if any wiphy needs time to become paused.
- */
-
- return 0;
-}
-
-bool ath9k_wiphy_started(struct ath_softc *sc)
-{
- int i;
- spin_lock_bh(&sc->wiphy_lock);
- if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) {
- spin_unlock_bh(&sc->wiphy_lock);
- return true;
- }
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] &&
- sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE) {
- spin_unlock_bh(&sc->wiphy_lock);
- return true;
- }
- }
- spin_unlock_bh(&sc->wiphy_lock);
- return false;
-}
-
-static void ath9k_wiphy_pause_chan(struct ath_wiphy *aphy,
- struct ath_wiphy *selected)
-{
- if (selected->state == ATH_WIPHY_SCAN) {
- if (aphy == selected)
- return;
- /*
- * Pause all other wiphys for the duration of the scan even if
- * they are on the current channel now.
- */
- } else if (aphy->chan_idx == selected->chan_idx)
- return;
- aphy->state = ATH_WIPHY_PAUSED;
- ieee80211_stop_queues(aphy->hw);
-}
-
-void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
- struct ath_wiphy *selected)
-{
- int i;
- spin_lock_bh(&sc->wiphy_lock);
- if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
- ath9k_wiphy_pause_chan(sc->pri_wiphy, selected);
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- if (sc->sec_wiphy[i] &&
- sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
- ath9k_wiphy_pause_chan(sc->sec_wiphy[i], selected);
- }
- spin_unlock_bh(&sc->wiphy_lock);
-}
-
-void ath9k_wiphy_work(struct work_struct *work)
-{
- struct ath_softc *sc = container_of(work, struct ath_softc,
- wiphy_work.work);
- struct ath_wiphy *aphy = NULL;
- bool first = true;
-
- spin_lock_bh(&sc->wiphy_lock);
-
- if (sc->wiphy_scheduler_int == 0) {
- /* wiphy scheduler is disabled */
- spin_unlock_bh(&sc->wiphy_lock);
- return;
- }
-
-try_again:
- sc->wiphy_scheduler_index++;
- while (sc->wiphy_scheduler_index <= sc->num_sec_wiphy) {
- aphy = sc->sec_wiphy[sc->wiphy_scheduler_index - 1];
- if (aphy && aphy->state != ATH_WIPHY_INACTIVE)
- break;
-
- sc->wiphy_scheduler_index++;
- aphy = NULL;
- }
- if (aphy == NULL) {
- sc->wiphy_scheduler_index = 0;
- if (sc->pri_wiphy->state == ATH_WIPHY_INACTIVE) {
- if (first) {
- first = false;
- goto try_again;
- }
- /* No wiphy is ready to be scheduled */
- } else
- aphy = sc->pri_wiphy;
- }
-
- spin_unlock_bh(&sc->wiphy_lock);
-
- if (aphy &&
- aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN &&
- ath9k_wiphy_select(aphy)) {
- printk(KERN_DEBUG "ath9k: Failed to schedule virtual wiphy "
- "change\n");
- }
-
- ieee80211_queue_delayed_work(sc->hw,
- &sc->wiphy_work,
- sc->wiphy_scheduler_int);
-}
-
-void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int)
-{
- cancel_delayed_work_sync(&sc->wiphy_work);
- sc->wiphy_scheduler_int = msecs_to_jiffies(msec_int);
- if (sc->wiphy_scheduler_int)
- ieee80211_queue_delayed_work(sc->hw, &sc->wiphy_work,
- sc->wiphy_scheduler_int);
-}
-
-/* caller must hold wiphy_lock */
-bool ath9k_all_wiphys_idle(struct ath_softc *sc)
-{
- unsigned int i;
- if (!sc->pri_wiphy->idle)
- return false;
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (!aphy)
- continue;
- if (!aphy->idle)
- return false;
- }
- return true;
-}
-
-/* caller must hold wiphy_lock */
-void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle)
-{
- struct ath_softc *sc = aphy->sc;
-
- aphy->idle = idle;
- ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
- "Marking %s as %sidle\n",
- wiphy_name(aphy->hw->wiphy), idle ? "" : "not-");
-}
-/* Only bother starting a queue on an active virtual wiphy */
-bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue)
-{
- struct ieee80211_hw *hw = sc->pri_wiphy->hw;
- unsigned int i;
- bool txq_started = false;
-
- spin_lock_bh(&sc->wiphy_lock);
-
- /* Start the primary wiphy */
- if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) {
- ieee80211_wake_queue(hw, skb_queue);
- txq_started = true;
- goto unlock;
- }
-
- /* Now start the secondary wiphy queues */
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (!aphy)
- continue;
- if (aphy->state != ATH_WIPHY_ACTIVE)
- continue;
-
- hw = aphy->hw;
- ieee80211_wake_queue(hw, skb_queue);
- txq_started = true;
- break;
- }
-
-unlock:
- spin_unlock_bh(&sc->wiphy_lock);
- return txq_started;
-}
-
-/* Go ahead and propagate information to all virtual wiphys, it won't hurt */
-void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue)
-{
- struct ieee80211_hw *hw = sc->pri_wiphy->hw;
- unsigned int i;
-
- spin_lock_bh(&sc->wiphy_lock);
-
- /* Stop the primary wiphy */
- ieee80211_stop_queue(hw, skb_queue);
-
- /* Now stop the secondary wiphy queues */
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (!aphy)
- continue;
- hw = aphy->hw;
- ieee80211_stop_queue(hw, skb_queue);
- }
- spin_unlock_bh(&sc->wiphy_lock);
-}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 07b7804..9f4e755 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -19,7 +19,6 @@
#define BITS_PER_BYTE 8
#define OFDM_PLCP_BITS 22
-#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
#define L_STF 8
#define L_LTF 8
@@ -32,7 +31,6 @@
#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
-#define OFDM_SIFS_TIME 16
static u16 bits_per_symbol[][2] = {
/* 20MHz 40MHz */
@@ -57,8 +55,9 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *head);
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
-static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
- int nframes, int nbad, int txok, bool update_rc);
+static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
+ struct ath_tx_status *ts, int nframes, int nbad,
+ int txok, bool update_rc);
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
int seqno);
@@ -169,7 +168,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
ath_tx_update_baw(sc, tid, fi->seqno);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
} else {
- ath_tx_send_normal(sc, txq, tid, &bf_head);
+ ath_tx_send_normal(sc, txq, NULL, &bf_head);
}
spin_lock_bh(&txq->axq_lock);
}
@@ -297,7 +296,6 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
ATH_TXBUF_RESET(tbf);
- tbf->aphy = bf->aphy;
tbf->bf_mpdu = bf->bf_mpdu;
tbf->bf_buf_addr = bf->bf_buf_addr;
memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
@@ -345,7 +343,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_node *an = NULL;
struct sk_buff *skb;
struct ieee80211_sta *sta;
- struct ieee80211_hw *hw;
+ struct ieee80211_hw *hw = sc->hw;
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *tx_info;
struct ath_atx_tid *tid = NULL;
@@ -364,7 +362,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
hdr = (struct ieee80211_hdr *)skb->data;
tx_info = IEEE80211_SKB_CB(skb);
- hw = bf->aphy->hw;
memcpy(rates, tx_info->control.rates, sizeof(rates));
@@ -383,7 +380,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
!bf->bf_stale || bf_next != NULL)
list_move_tail(&bf->list, &bf_head);
- ath_tx_rc_status(bf, ts, 1, 1, 0, false);
+ ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
0, 0);
@@ -429,7 +426,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
while (bf) {
- txfail = txpending = 0;
+ txfail = txpending = sendbar = 0;
bf_next = bf->bf_next;
skb = bf->bf_mpdu;
@@ -489,10 +486,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
memcpy(tx_info->control.rates, rates, sizeof(rates));
- ath_tx_rc_status(bf, ts, nframes, nbad, txok, true);
+ ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
rc_update = false;
} else {
- ath_tx_rc_status(bf, ts, nframes, nbad, txok, false);
+ ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
}
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
@@ -516,7 +513,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
bf->bf_state.bf_type |=
BUF_XRETRY;
- ath_tx_rc_status(bf, ts, nframes,
+ ath_tx_rc_status(sc, bf, ts, nframes,
nbad, 0, false);
ath_tx_complete_buf(sc, bf, txq,
&bf_head,
@@ -566,8 +563,11 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
rcu_read_unlock();
- if (needreset)
+ if (needreset) {
+ spin_unlock_bh(&sc->sc_pcu_lock);
ath_reset(sc, false);
+ spin_lock_bh(&sc->sc_pcu_lock);
+ }
}
static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
@@ -856,7 +856,10 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
txtid->state |= AGGR_ADDBA_PROGRESS;
txtid->paused = true;
- *ssn = txtid->seq_start;
+ *ssn = txtid->seq_start = txtid->seq_next;
+
+ memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
+ txtid->baw_head = txtid->baw_tail = 0;
return 0;
}
@@ -942,7 +945,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
[WME_AC_VI] = ATH_TXQ_AC_VI,
[WME_AC_VO] = ATH_TXQ_AC_VO,
};
- int qnum, i;
+ int axq_qnum, i;
memset(&qi, 0, sizeof(qi));
qi.tqi_subtype = subtype_txq_to_hwq[subtype];
@@ -976,24 +979,25 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
TXQ_FLAG_TXDESCINT_ENABLE;
}
- qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
- if (qnum == -1) {
+ axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
+ if (axq_qnum == -1) {
/*
* NB: don't print a message, this happens
* normally on parts with too few tx queues
*/
return NULL;
}
- if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
+ if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
ath_err(common, "qnum %u out of range, max %zu!\n",
- qnum, ARRAY_SIZE(sc->tx.txq));
- ath9k_hw_releasetxqueue(ah, qnum);
+ axq_qnum, ARRAY_SIZE(sc->tx.txq));
+ ath9k_hw_releasetxqueue(ah, axq_qnum);
return NULL;
}
- if (!ATH_TXQ_SETUP(sc, qnum)) {
- struct ath_txq *txq = &sc->tx.txq[qnum];
+ if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
+ struct ath_txq *txq = &sc->tx.txq[axq_qnum];
- txq->axq_qnum = qnum;
+ txq->axq_qnum = axq_qnum;
+ txq->mac80211_qnum = -1;
txq->axq_link = NULL;
INIT_LIST_HEAD(&txq->axq_q);
INIT_LIST_HEAD(&txq->axq_acq);
@@ -1001,14 +1005,14 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
txq->axq_depth = 0;
txq->axq_ampdu_depth = 0;
txq->axq_tx_inprogress = false;
- sc->tx.txqsetup |= 1<<qnum;
+ sc->tx.txqsetup |= 1<<axq_qnum;
txq->txq_headidx = txq->txq_tailidx = 0;
for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
INIT_LIST_HEAD(&txq->txq_fifo[i]);
INIT_LIST_HEAD(&txq->txq_fifo_pending);
}
- return &sc->tx.txq[qnum];
+ return &sc->tx.txq[axq_qnum];
}
int ath_txq_update(struct ath_softc *sc, int qnum,
@@ -1205,8 +1209,17 @@ bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
ath_err(common, "Failed to stop TX DMA!\n");
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
- if (ATH_TXQ_SETUP(sc, i))
- ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
+ if (!ATH_TXQ_SETUP(sc, i))
+ continue;
+
+ /*
+ * The caller will resume queues with ieee80211_wake_queues.
+ * Mark the queue as not stopped to prevent ath_tx_complete
+ * from waking the queue too early.
+ */
+ txq = &sc->tx.txq[i];
+ txq->stopped = false;
+ ath_draintxq(sc, txq, retry_tx);
}
return !npend;
@@ -1218,46 +1231,59 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
}
+/* For each axq_acq entry, for each tid, try to schedule packets
+ * for transmit until ampdu_depth has reached min Q depth.
+ */
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
- struct ath_atx_ac *ac;
- struct ath_atx_tid *tid;
+ struct ath_atx_ac *ac, *ac_tmp, *last_ac;
+ struct ath_atx_tid *tid, *last_tid;
- if (list_empty(&txq->axq_acq))
+ if (list_empty(&txq->axq_acq) ||
+ txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
return;
ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
- list_del(&ac->list);
- ac->sched = false;
+ last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
- do {
- if (list_empty(&ac->tid_q))
- return;
+ list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
+ last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
+ list_del(&ac->list);
+ ac->sched = false;
- tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list);
- list_del(&tid->list);
- tid->sched = false;
+ while (!list_empty(&ac->tid_q)) {
+ tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
+ list);
+ list_del(&tid->list);
+ tid->sched = false;
- if (tid->paused)
- continue;
+ if (tid->paused)
+ continue;
- ath_tx_sched_aggr(sc, txq, tid);
+ ath_tx_sched_aggr(sc, txq, tid);
- /*
- * add tid to round-robin queue if more frames
- * are pending for the tid
- */
- if (!list_empty(&tid->buf_q))
- ath_tx_queue_tid(txq, tid);
+ /*
+ * add tid to round-robin queue if more frames
+ * are pending for the tid
+ */
+ if (!list_empty(&tid->buf_q))
+ ath_tx_queue_tid(txq, tid);
- break;
- } while (!list_empty(&ac->tid_q));
+ if (tid == last_tid ||
+ txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
+ break;
+ }
- if (!list_empty(&ac->tid_q)) {
- if (!ac->sched) {
- ac->sched = true;
- list_add_tail(&ac->list, &txq->axq_acq);
+ if (!list_empty(&ac->tid_q)) {
+ if (!ac->sched) {
+ ac->sched = true;
+ list_add_tail(&ac->list, &txq->axq_acq);
+ }
}
+
+ if (ac == last_ac ||
+ txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
+ return;
}
}
@@ -1301,6 +1327,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
+ TX_STAT_INC(txq->axq_qnum, puttxbuf);
ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
@@ -1308,6 +1335,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
list_splice_tail_init(head, &txq->axq_q);
if (txq->axq_link == NULL) {
+ TX_STAT_INC(txq->axq_qnum, puttxbuf);
ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
txq->axq_qnum, ito64(bf->bf_daddr),
@@ -1321,6 +1349,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
}
ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
&txq->axq_link);
+ TX_STAT_INC(txq->axq_qnum, txstart);
ath9k_hw_txstart(ah, txq->axq_qnum);
}
txq->axq_depth++;
@@ -1335,7 +1364,6 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
struct list_head bf_head;
bf->bf_state.bf_type |= BUF_AMPDU;
- TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
/*
* Do not queue to h/w when any of the following conditions is true:
@@ -1351,6 +1379,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
* Add this frame to software queue for scheduling later
* for aggregation.
*/
+ TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
list_add_tail(&bf->list, &tid->buf_q);
ath_tx_queue_tid(txctl->txq, tid);
return;
@@ -1364,6 +1393,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
ath_tx_addto_baw(sc, tid, fi->seqno);
/* Queue to h/w without aggregation */
+ TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
bf->bf_lastbf = bf;
ath_buf_set_rate(sc, bf, fi->framelen);
ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
@@ -1416,8 +1446,7 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
int framelen)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = tx_info->control.sta;
struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
@@ -1635,8 +1664,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
struct ath_txq *txq,
struct sk_buff *skb)
{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_frame_info *fi = get_frame_info(skb);
@@ -1652,7 +1680,6 @@ static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
ATH_TXBUF_RESET(bf);
- bf->aphy = aphy;
bf->bf_flags = setup_tx_flags(skb);
bf->bf_mpdu = skb;
@@ -1741,8 +1768,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = info->control.sta;
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
+ struct ath_softc *sc = hw->priv;
struct ath_txq *txq = txctl->txq;
struct ath_buf *bf;
int padpos, padsize;
@@ -1794,7 +1820,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
spin_lock_bh(&txq->axq_lock);
if (txq == sc->tx.txq_map[q] &&
++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
- ath_mac80211_stop_queue(sc, q);
+ ieee80211_stop_queue(sc->hw, q);
txq->stopped = 1;
}
spin_unlock_bh(&txq->axq_lock);
@@ -1809,8 +1835,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
/*****************/
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
- struct ath_wiphy *aphy, int tx_flags, int ftype,
- struct ath_txq *txq)
+ int tx_flags, int ftype, struct ath_txq *txq)
{
struct ieee80211_hw *hw = sc->hw;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -1820,9 +1845,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
- if (aphy)
- hw = aphy->hw;
-
if (tx_flags & ATH_TX_BAR)
tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
@@ -1852,19 +1874,20 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
PS_WAIT_FOR_TX_ACK));
}
- if (unlikely(ftype))
- ath9k_tx_status(hw, skb, ftype);
- else {
- q = skb_get_queue_mapping(skb);
- if (txq == sc->tx.txq_map[q]) {
- spin_lock_bh(&txq->axq_lock);
- if (WARN_ON(--txq->pending_frames < 0))
- txq->pending_frames = 0;
- spin_unlock_bh(&txq->axq_lock);
- }
+ q = skb_get_queue_mapping(skb);
+ if (txq == sc->tx.txq_map[q]) {
+ spin_lock_bh(&txq->axq_lock);
+ if (WARN_ON(--txq->pending_frames < 0))
+ txq->pending_frames = 0;
- ieee80211_tx_status(hw, skb);
+ if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
+ ieee80211_wake_queue(sc->hw, q);
+ txq->stopped = 0;
+ }
+ spin_unlock_bh(&txq->axq_lock);
}
+
+ ieee80211_tx_status(hw, skb);
}
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
@@ -1896,8 +1919,8 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
else
complete(&sc->paprd_complete);
} else {
- ath_debug_stat_tx(sc, bf, ts);
- ath_tx_complete(sc, skb, bf->aphy, tx_flags,
+ ath_debug_stat_tx(sc, bf, ts, txq);
+ ath_tx_complete(sc, skb, tx_flags,
bf->bf_state.bfs_ftype, txq);
}
/* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
@@ -1913,14 +1936,14 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
}
-static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
- int nframes, int nbad, int txok, bool update_rc)
+static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
+ struct ath_tx_status *ts, int nframes, int nbad,
+ int txok, bool update_rc)
{
struct sk_buff *skb = bf->bf_mpdu;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hw *hw = bf->aphy->hw;
- struct ath_softc *sc = bf->aphy->sc;
+ struct ieee80211_hw *hw = sc->hw;
struct ath_hw *ah = sc->sc_ah;
u8 i, tx_rateindex;
@@ -1971,19 +1994,6 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
}
-static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
-{
- struct ath_txq *txq;
-
- txq = sc->tx.txq_map[qnum];
- spin_lock_bh(&txq->axq_lock);
- if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
- if (ath_mac80211_start_queue(sc, qnum))
- txq->stopped = 0;
- }
- spin_unlock_bh(&txq->axq_lock);
-}
-
static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
{
struct ath_hw *ah = sc->sc_ah;
@@ -1994,7 +2004,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
struct ath_tx_status ts;
int txok;
int status;
- int qnum;
ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
@@ -2004,6 +2013,8 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
spin_lock_bh(&txq->axq_lock);
if (list_empty(&txq->axq_q)) {
txq->axq_link = NULL;
+ if (sc->sc_flags & SC_OP_TXAGGR)
+ ath_txq_schedule(sc, txq);
spin_unlock_bh(&txq->axq_lock);
break;
}
@@ -2038,6 +2049,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
spin_unlock_bh(&txq->axq_lock);
break;
}
+ TX_STAT_INC(txq->axq_qnum, txprocdesc);
/*
* Remove ath_buf's of the same transmit unit from txq,
@@ -2070,27 +2082,45 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
*/
if (ts.ts_status & ATH9K_TXERR_XRETRY)
bf->bf_state.bf_type |= BUF_XRETRY;
- ath_tx_rc_status(bf, &ts, 1, txok ? 0 : 1, txok, true);
+ ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true);
}
- qnum = skb_get_queue_mapping(bf->bf_mpdu);
-
if (bf_isampdu(bf))
ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
true);
else
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
- if (txq == sc->tx.txq_map[qnum])
- ath_wake_mac80211_queue(sc, qnum);
-
spin_lock_bh(&txq->axq_lock);
+
if (sc->sc_flags & SC_OP_TXAGGR)
ath_txq_schedule(sc, txq);
spin_unlock_bh(&txq->axq_lock);
}
}
+static void ath_hw_pll_work(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc,
+ hw_pll_work.work);
+ static int count;
+
+ if (AR_SREV_9485(sc->sc_ah)) {
+ if (ar9003_get_pll_sqsum_dvc(sc->sc_ah) >= 0x40000) {
+ count++;
+
+ if (count == 3) {
+ /* Rx is hung for more than 500ms. Reset it */
+ ath_reset(sc, true);
+ count = 0;
+ }
+ } else
+ count = 0;
+
+ ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
+ }
+}
+
static void ath_tx_complete_poll_work(struct work_struct *work)
{
struct ath_softc *sc = container_of(work, struct ath_softc,
@@ -2098,6 +2128,9 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
struct ath_txq *txq;
int i;
bool needreset = false;
+#ifdef CONFIG_ATH9K_DEBUGFS
+ sc->tx_complete_poll_work_seen++;
+#endif
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
if (ATH_TXQ_SETUP(sc, i)) {
@@ -2111,6 +2144,33 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
} else {
txq->axq_tx_inprogress = true;
}
+ } else {
+ /* If the queue has pending buffers, then it
+ * should be doing tx work (and have axq_depth).
+ * Shouldn't get to this state I think..but
+ * we do.
+ */
+ if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) &&
+ (txq->pending_frames > 0 ||
+ !list_empty(&txq->axq_acq) ||
+ txq->stopped)) {
+ ath_err(ath9k_hw_common(sc->sc_ah),
+ "txq: %p axq_qnum: %u,"
+ " mac80211_qnum: %i"
+ " axq_link: %p"
+ " pending frames: %i"
+ " axq_acq empty: %i"
+ " stopped: %i"
+ " axq_depth: 0 Attempting to"
+ " restart tx logic.\n",
+ txq, txq->axq_qnum,
+ txq->mac80211_qnum,
+ txq->axq_link,
+ txq->pending_frames,
+ list_empty(&txq->axq_acq),
+ txq->stopped);
+ ath_txq_schedule(sc, txq);
+ }
}
spin_unlock_bh(&txq->axq_lock);
}
@@ -2150,7 +2210,6 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
struct list_head bf_head;
int status;
int txok;
- int qnum;
for (;;) {
status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
@@ -2193,11 +2252,9 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
if (!bf_isampdu(bf)) {
if (txs.ts_status & ATH9K_TXERR_XRETRY)
bf->bf_state.bf_type |= BUF_XRETRY;
- ath_tx_rc_status(bf, &txs, 1, txok ? 0 : 1, txok, true);
+ ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true);
}
- qnum = skb_get_queue_mapping(bf->bf_mpdu);
-
if (bf_isampdu(bf))
ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
txok, true);
@@ -2205,10 +2262,8 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
ath_tx_complete_buf(sc, bf, txq, &bf_head,
&txs, txok, 0);
- if (txq == sc->tx.txq_map[qnum])
- ath_wake_mac80211_queue(sc, qnum);
-
spin_lock_bh(&txq->axq_lock);
+
if (!list_empty(&txq->txq_fifo_pending)) {
INIT_LIST_HEAD(&bf_head);
bf = list_first_entry(&txq->txq_fifo_pending,
@@ -2285,6 +2340,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
}
INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
+ INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
error = ath_tx_edma_init(sc);
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index d07ff7f..420d437 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -283,6 +283,7 @@ struct ar9170 {
unsigned int mem_blocks;
unsigned int mem_block_size;
unsigned int rx_size;
+ unsigned int tx_seq_table;
} fw;
/* reset / stuck frames/queue detection */
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 546b4e4..9517ede 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -150,6 +150,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
const struct carl9170fw_otus_desc *otus_desc;
const struct carl9170fw_chk_desc *chk_desc;
const struct carl9170fw_last_desc *last_desc;
+ const struct carl9170fw_txsq_desc *txsq_desc;
last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC,
sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER);
@@ -264,6 +265,9 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
FIF_PROMISC_IN_BSS;
}
+ if (SUPP(CARL9170FW_WOL))
+ device_set_wakeup_enable(&ar->udev->dev, true);
+
ar->fw.vif_num = otus_desc->vif_num;
ar->fw.cmd_bufs = otus_desc->cmd_bufs;
ar->fw.address = le32_to_cpu(otus_desc->fw_address);
@@ -296,6 +300,17 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
}
}
+ txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC,
+ sizeof(*txsq_desc), CARL9170FW_TXSQ_DESC_CUR_VER);
+
+ if (txsq_desc) {
+ ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr);
+ if (!valid_cpu_addr(ar->fw.tx_seq_table))
+ return -EINVAL;
+ } else {
+ ar->fw.tx_seq_table = 0;
+ }
+
#undef SUPPORTED
return 0;
}
diff --git a/drivers/net/wireless/ath/carl9170/fwcmd.h b/drivers/net/wireless/ath/carl9170/fwcmd.h
index 3680dfc7..30449d2 100644
--- a/drivers/net/wireless/ath/carl9170/fwcmd.h
+++ b/drivers/net/wireless/ath/carl9170/fwcmd.h
@@ -167,6 +167,7 @@ struct carl9170_rx_filter_cmd {
#define CARL9170_RX_FILTER_CTL_BACKR 0x20
#define CARL9170_RX_FILTER_MGMT 0x40
#define CARL9170_RX_FILTER_DATA 0x80
+#define CARL9170_RX_FILTER_EVERYTHING (~0)
struct carl9170_bcn_ctrl_cmd {
__le32 vif_id;
diff --git a/drivers/net/wireless/ath/carl9170/fwdesc.h b/drivers/net/wireless/ath/carl9170/fwdesc.h
index 71f3821..9210668 100644
--- a/drivers/net/wireless/ath/carl9170/fwdesc.h
+++ b/drivers/net/wireless/ath/carl9170/fwdesc.h
@@ -69,6 +69,9 @@ enum carl9170fw_feature_list {
/* Firmware RX filter | CARL9170_CMD_RX_FILTER */
CARL9170FW_RX_FILTER,
+ /* Wake up on WLAN */
+ CARL9170FW_WOL,
+
/* KEEP LAST */
__CARL9170FW_FEATURE_NUM
};
@@ -78,6 +81,7 @@ enum carl9170fw_feature_list {
#define FIX_MAGIC "FIX\0"
#define DBG_MAGIC "DBG\0"
#define CHK_MAGIC "CHK\0"
+#define TXSQ_MAGIC "TXSQ"
#define LAST_MAGIC "LAST"
#define CARL9170FW_SET_DAY(d) (((d) - 1) % 31)
@@ -88,8 +92,10 @@ enum carl9170fw_feature_list {
#define CARL9170FW_GET_MONTH(m) ((((m) / 31) % 12) + 1)
#define CARL9170FW_GET_YEAR(y) ((y) / 372 + 10)
+#define CARL9170FW_MAGIC_SIZE 4
+
struct carl9170fw_desc_head {
- u8 magic[4];
+ u8 magic[CARL9170FW_MAGIC_SIZE];
__le16 length;
u8 min_ver;
u8 cur_ver;
@@ -170,6 +176,16 @@ struct carl9170fw_chk_desc {
#define CARL9170FW_CHK_DESC_SIZE \
(sizeof(struct carl9170fw_chk_desc))
+#define CARL9170FW_TXSQ_DESC_MIN_VER 1
+#define CARL9170FW_TXSQ_DESC_CUR_VER 1
+struct carl9170fw_txsq_desc {
+ struct carl9170fw_desc_head head;
+
+ __le32 seq_table_addr;
+} __packed;
+#define CARL9170FW_TXSQ_DESC_SIZE \
+ (sizeof(struct carl9170fw_txsq_desc))
+
#define CARL9170FW_LAST_DESC_MIN_VER 1
#define CARL9170FW_LAST_DESC_CUR_VER 2
struct carl9170fw_last_desc {
@@ -189,8 +205,8 @@ struct carl9170fw_last_desc {
}
static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head,
- u8 magic[4], __le16 length,
- u8 min_ver, u8 cur_ver)
+ u8 magic[CARL9170FW_MAGIC_SIZE],
+ __le16 length, u8 min_ver, u8 cur_ver)
{
head->magic[0] = magic[0];
head->magic[1] = magic[1];
@@ -204,7 +220,7 @@ static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head,
#define carl9170fw_for_each_hdr(desc, fw_desc) \
for (desc = fw_desc; \
- memcmp(desc->magic, LAST_MAGIC, 4) && \
+ memcmp(desc->magic, LAST_MAGIC, CARL9170FW_MAGIC_SIZE) && \
le16_to_cpu(desc->length) >= CARL9170FW_DESC_HEAD_SIZE && \
le16_to_cpu(desc->length) < CARL9170FW_DESC_MAX_LENGTH; \
desc = (void *)((unsigned long)desc + le16_to_cpu(desc->length)))
@@ -218,8 +234,8 @@ static inline bool carl9170fw_supports(__le32 list, u8 feature)
}
static inline bool carl9170fw_desc_cmp(const struct carl9170fw_desc_head *head,
- const u8 descid[4], u16 min_len,
- u8 compatible_revision)
+ const u8 descid[CARL9170FW_MAGIC_SIZE],
+ u16 min_len, u8 compatible_revision)
{
if (descid[0] == head->magic[0] && descid[1] == head->magic[1] &&
descid[2] == head->magic[2] && descid[3] == head->magic[3] &&
diff --git a/drivers/net/wireless/ath/carl9170/hw.h b/drivers/net/wireless/ath/carl9170/hw.h
index e85df6e..4e30762 100644
--- a/drivers/net/wireless/ath/carl9170/hw.h
+++ b/drivers/net/wireless/ath/carl9170/hw.h
@@ -463,6 +463,8 @@
#define AR9170_PWR_REG_CHIP_REVISION (AR9170_PWR_REG_BASE + 0x010)
#define AR9170_PWR_REG_PLL_ADDAC (AR9170_PWR_REG_BASE + 0x014)
+#define AR9170_PWR_PLL_ADDAC_DIV_S 2
+#define AR9170_PWR_PLL_ADDAC_DIV 0xffc
#define AR9170_PWR_REG_WATCH_DOG_MAGIC (AR9170_PWR_REG_BASE + 0x020)
/* Faraday USB Controller */
@@ -471,6 +473,9 @@
#define AR9170_USB_REG_MAIN_CTRL (AR9170_USB_REG_BASE + 0x000)
#define AR9170_USB_MAIN_CTRL_REMOTE_WAKEUP BIT(0)
#define AR9170_USB_MAIN_CTRL_ENABLE_GLOBAL_INT BIT(2)
+#define AR9170_USB_MAIN_CTRL_GO_TO_SUSPEND BIT(3)
+#define AR9170_USB_MAIN_CTRL_RESET BIT(4)
+#define AR9170_USB_MAIN_CTRL_CHIP_ENABLE BIT(5)
#define AR9170_USB_MAIN_CTRL_HIGHSPEED BIT(6)
#define AR9170_USB_REG_DEVICE_ADDRESS (AR9170_USB_REG_BASE + 0x001)
@@ -499,6 +504,13 @@
#define AR9170_USB_REG_INTR_GROUP (AR9170_USB_REG_BASE + 0x020)
#define AR9170_USB_REG_INTR_SOURCE_0 (AR9170_USB_REG_BASE + 0x021)
+#define AR9170_USB_INTR_SRC0_SETUP BIT(0)
+#define AR9170_USB_INTR_SRC0_IN BIT(1)
+#define AR9170_USB_INTR_SRC0_OUT BIT(2)
+#define AR9170_USB_INTR_SRC0_FAIL BIT(3) /* ??? */
+#define AR9170_USB_INTR_SRC0_END BIT(4) /* ??? */
+#define AR9170_USB_INTR_SRC0_ABORT BIT(7)
+
#define AR9170_USB_REG_INTR_SOURCE_1 (AR9170_USB_REG_BASE + 0x022)
#define AR9170_USB_REG_INTR_SOURCE_2 (AR9170_USB_REG_BASE + 0x023)
#define AR9170_USB_REG_INTR_SOURCE_3 (AR9170_USB_REG_BASE + 0x024)
@@ -506,6 +518,15 @@
#define AR9170_USB_REG_INTR_SOURCE_5 (AR9170_USB_REG_BASE + 0x026)
#define AR9170_USB_REG_INTR_SOURCE_6 (AR9170_USB_REG_BASE + 0x027)
#define AR9170_USB_REG_INTR_SOURCE_7 (AR9170_USB_REG_BASE + 0x028)
+#define AR9170_USB_INTR_SRC7_USB_RESET BIT(1)
+#define AR9170_USB_INTR_SRC7_USB_SUSPEND BIT(2)
+#define AR9170_USB_INTR_SRC7_USB_RESUME BIT(3)
+#define AR9170_USB_INTR_SRC7_ISO_SEQ_ERR BIT(4)
+#define AR9170_USB_INTR_SRC7_ISO_SEQ_ABORT BIT(5)
+#define AR9170_USB_INTR_SRC7_TX0BYTE BIT(6)
+#define AR9170_USB_INTR_SRC7_RX0BYTE BIT(7)
+
+#define AR9170_USB_REG_IDLE_COUNT (AR9170_USB_REG_BASE + 0x02f)
#define AR9170_USB_REG_EP_MAP (AR9170_USB_REG_BASE + 0x030)
#define AR9170_USB_REG_EP1_MAP (AR9170_USB_REG_BASE + 0x030)
@@ -581,6 +602,10 @@
#define AR9170_USB_REG_MAX_AGG_UPLOAD (AR9170_USB_REG_BASE + 0x110)
#define AR9170_USB_REG_UPLOAD_TIME_CTL (AR9170_USB_REG_BASE + 0x114)
+
+#define AR9170_USB_REG_WAKE_UP (AR9170_USB_REG_BASE + 0x120)
+#define AR9170_USB_WAKE_UP_WAKE BIT(0)
+
#define AR9170_USB_REG_CBUS_CTRL (AR9170_USB_REG_BASE + 0x1f0)
#define AR9170_USB_CBUS_CTRL_BUFFER_END (BIT(1))
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 870df8c..ede3d7e 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -662,6 +662,13 @@ init:
goto unlock;
}
+ if (ar->fw.tx_seq_table) {
+ err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
+ 0);
+ if (err)
+ goto unlock;
+ }
+
unlock:
if (err && (vif_id >= 0)) {
vif_priv->active = false;
@@ -1279,7 +1286,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta,
- u16 tid, u16 *ssn)
+ u16 tid, u16 *ssn, u8 buf_size)
{
struct ar9170 *ar = hw->priv;
struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 6cc58e0..6f41e21 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -862,6 +862,9 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
if (unlikely(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM))
txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB;
+ if (unlikely(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
+ txc->s.misc |= CARL9170_TX_SUPER_MISC_ASSIGN_SEQ;
+
if (unlikely(ieee80211_is_probe_resp(hdr->frame_control)))
txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF;
diff --git a/drivers/net/wireless/ath/carl9170/version.h b/drivers/net/wireless/ath/carl9170/version.h
index ee0f84f..15095c0 100644
--- a/drivers/net/wireless/ath/carl9170/version.h
+++ b/drivers/net/wireless/ath/carl9170/version.h
@@ -1,7 +1,7 @@
#ifndef __CARL9170_SHARED_VERSION_H
#define __CARL9170_SHARED_VERSION_H
-#define CARL9170FW_VERSION_YEAR 10
-#define CARL9170FW_VERSION_MONTH 10
-#define CARL9170FW_VERSION_DAY 29
-#define CARL9170FW_VERSION_GIT "1.9.0"
+#define CARL9170FW_VERSION_YEAR 11
+#define CARL9170FW_VERSION_MONTH 1
+#define CARL9170FW_VERSION_DAY 22
+#define CARL9170FW_VERSION_GIT "1.9.2"
#endif /* __CARL9170_SHARED_VERSION_H */
diff --git a/drivers/net/wireless/ath/carl9170/wlan.h b/drivers/net/wireless/ath/carl9170/wlan.h
index 24d63b5..9e1324b 100644
--- a/drivers/net/wireless/ath/carl9170/wlan.h
+++ b/drivers/net/wireless/ath/carl9170/wlan.h
@@ -251,7 +251,7 @@ struct carl9170_tx_superdesc {
u8 ampdu_commit_factor:1;
u8 ampdu_unused_bit:1;
u8 queue:2;
- u8 reserved:1;
+ u8 assign_seq:1;
u8 vif_id:3;
u8 fill_in_tsf:1;
u8 cab:1;
@@ -299,6 +299,7 @@ struct _ar9170_tx_hwdesc {
#define CARL9170_TX_SUPER_MISC_QUEUE 0x3
#define CARL9170_TX_SUPER_MISC_QUEUE_S 0
+#define CARL9170_TX_SUPER_MISC_ASSIGN_SEQ 0x4
#define CARL9170_TX_SUPER_MISC_VIF_ID 0x38
#define CARL9170_TX_SUPER_MISC_VIF_ID_S 3
#define CARL9170_TX_SUPER_MISC_FILL_IN_TSF 0x40
@@ -413,6 +414,23 @@ enum ar9170_txq {
__AR9170_NUM_TXQ,
};
+/*
+ * This is an workaround for several undocumented bugs.
+ * Don't mess with the QoS/AC <-> HW Queue map, if you don't
+ * know what you are doing.
+ *
+ * Known problems [hardware]:
+ * * The MAC does not aggregate frames on anything other
+ * than the first HW queue.
+ * * when an AMPDU is placed [in the first hw queue] and
+ * additional frames are already queued on a different
+ * hw queue, the MAC will ALWAYS freeze.
+ *
+ * In a nutshell: The hardware can either do QoS or
+ * Aggregation but not both at the same time. As a
+ * result, this makes the device pretty much useless
+ * for any serious 802.11n setup.
+ */
static const u8 ar9170_qmap[__AR9170_NUM_TXQ] = { 2, 1, 0, 3 };
#define AR9170_TXQ_DEPTH 32
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index 5d465e5..37b8e11 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -58,8 +58,11 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0);
REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
- if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)
+ if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) {
REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
+ REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
+ AR_KEYTABLE_TYPE_CLR);
+ }
}
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 2b14775..f828f29 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -158,6 +158,13 @@ ieee80211_regdomain *ath_world_regdomain(struct ath_regulatory *reg)
}
}
+bool ath_is_49ghz_allowed(u16 regdomain)
+{
+ /* possibly more */
+ return regdomain == MKK9_MKKC;
+}
+EXPORT_SYMBOL(ath_is_49ghz_allowed);
+
/* Frequency is one where radar detection is required */
static bool ath_is_radar_freq(u16 center_freq)
{
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index 345dd97..172f63f6 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -250,6 +250,7 @@ enum CountryCode {
};
bool ath_is_world_regd(struct ath_regulatory *reg);
+bool ath_is_49ghz_allowed(u16 redomain);
int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy,
int (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request));
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 61915f3..da60fae 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1397,7 +1397,7 @@ static int ipw2100_power_cycle_adapter(struct ipw2100_priv *priv)
}
/*
- * Send the CARD_DISABLE_PHY_OFF comamnd to the card to disable it
+ * Send the CARD_DISABLE_PHY_OFF command to the card to disable it
*
* After disabling, if the card was associated, a STATUS_ASSN_LOST will be sent.
*
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index ed42457..e1e3b1c 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -2,6 +2,10 @@ config IWLWIFI
tristate "Intel Wireless Wifi"
depends on PCI && MAC80211
select FW_LOADER
+ select NEW_LEDS
+ select LEDS_CLASS
+ select LEDS_TRIGGERS
+ select MAC80211_LEDS
menu "Debugging Options"
depends on IWLWIFI
@@ -106,9 +110,27 @@ config IWL5000
Intel WiFi Link 1000BGN
Intel Wireless WiFi 5150AGN
Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN
- Intel 6000 Gen 2 Series Wi-Fi Adapters (6000G2A and 6000G2B)
- Intel WIreless WiFi Link 6050BGN Gen 2 Adapter
+ Intel 6005 Series Wi-Fi Adapters
+ Intel 6030 Series Wi-Fi Adapters
+ Intel Wireless WiFi Link 6150BGN 2 Adapter
Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
+ Intel 2000 Series Wi-Fi Adapters
+
+config IWL_P2P
+ bool "iwlwifi experimental P2P support"
+ depends on IWL5000
+ help
+ This option enables experimental P2P support for some devices
+ based on microcode support. Since P2P support is still under
+ development, this option may even enable it for some devices
+ now that turn out to not support it in the future due to
+ microcode restrictions.
+
+ To determine if your microcode supports the experimental P2P
+ offered by this option, check if the driver advertises AP
+ support when it is loaded.
+
+ Say Y only if you want to experiment with P2P.
config IWL3945
tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 93380f9..25be742 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -26,6 +26,7 @@ iwlagn-$(CONFIG_IWL5000) += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o
iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
iwlagn-$(CONFIG_IWL5000) += iwl-6000.o
iwlagn-$(CONFIG_IWL5000) += iwl-1000.o
+iwlagn-$(CONFIG_IWL5000) += iwl-2000.o
# 3945
obj-$(CONFIG_IWL3945) += iwl3945.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
new file mode 100644
index 0000000..3c5dd36
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -0,0 +1,560 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-sta.h"
+#include "iwl-agn.h"
+#include "iwl-helpers.h"
+#include "iwl-agn-hw.h"
+#include "iwl-6000-hw.h"
+#include "iwl-agn-led.h"
+#include "iwl-agn-debugfs.h"
+
+/* Highest firmware API version supported */
+#define IWL2030_UCODE_API_MAX 5
+#define IWL2000_UCODE_API_MAX 5
+#define IWL200_UCODE_API_MAX 5
+
+/* Lowest firmware API version supported */
+#define IWL2030_UCODE_API_MIN 5
+#define IWL2000_UCODE_API_MIN 5
+#define IWL200_UCODE_API_MIN 5
+
+#define IWL2030_FW_PRE "iwlwifi-2030-"
+#define _IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE #api ".ucode"
+#define IWL2030_MODULE_FIRMWARE(api) _IWL2030_MODULE_FIRMWARE(api)
+
+#define IWL2000_FW_PRE "iwlwifi-2000-"
+#define _IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE #api ".ucode"
+#define IWL2000_MODULE_FIRMWARE(api) _IWL2000_MODULE_FIRMWARE(api)
+
+#define IWL200_FW_PRE "iwlwifi-200-"
+#define _IWL200_MODULE_FIRMWARE(api) IWL200_FW_PRE #api ".ucode"
+#define IWL200_MODULE_FIRMWARE(api) _IWL200_MODULE_FIRMWARE(api)
+
+static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
+{
+ /* want Celsius */
+ priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
+ priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
+}
+
+/* NIC configuration for 2000 series */
+static void iwl2000_nic_config(struct iwl_priv *priv)
+{
+ u16 radio_cfg;
+
+ radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
+
+ /* write radio config values to register */
+ if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX)
+ iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
+ EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
+ EEPROM_RF_CFG_DASH_MSK(radio_cfg));
+
+ /* set CSR_HW_CONFIG_REG for uCode use */
+ iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+ CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+
+ if (priv->cfg->iq_invert)
+ iwl_set_bit(priv, CSR_GP_DRIVER_REG,
+ CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
+
+}
+
+static struct iwl_sensitivity_ranges iwl2000_sensitivity = {
+ .min_nrg_cck = 97,
+ .max_nrg_cck = 0, /* not used, set to 0 */
+ .auto_corr_min_ofdm = 80,
+ .auto_corr_min_ofdm_mrc = 128,
+ .auto_corr_min_ofdm_x1 = 105,
+ .auto_corr_min_ofdm_mrc_x1 = 192,
+
+ .auto_corr_max_ofdm = 145,
+ .auto_corr_max_ofdm_mrc = 232,
+ .auto_corr_max_ofdm_x1 = 110,
+ .auto_corr_max_ofdm_mrc_x1 = 232,
+
+ .auto_corr_min_cck = 125,
+ .auto_corr_max_cck = 175,
+ .auto_corr_min_cck_mrc = 160,
+ .auto_corr_max_cck_mrc = 310,
+ .nrg_th_cck = 97,
+ .nrg_th_ofdm = 100,
+
+ .barker_corr_th_min = 190,
+ .barker_corr_th_min_mrc = 390,
+ .nrg_th_cca = 62,
+};
+
+static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
+{
+ if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
+ priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
+ priv->cfg->base_params->num_of_queues =
+ priv->cfg->mod_params->num_of_queues;
+
+ priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
+ priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
+ priv->hw_params.scd_bc_tbls_size =
+ priv->cfg->base_params->num_of_queues *
+ sizeof(struct iwlagn_scd_bc_tbl);
+ priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
+ priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
+ priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
+
+ priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
+ priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
+
+ priv->hw_params.max_bsm_size = 0;
+ priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
+ BIT(IEEE80211_BAND_5GHZ);
+ priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
+
+ priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ if (priv->cfg->rx_with_siso_diversity)
+ priv->hw_params.rx_chains_num = 1;
+ else
+ priv->hw_params.rx_chains_num =
+ num_of_ant(priv->cfg->valid_rx_ant);
+ priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
+ priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+
+ iwl2000_set_ct_threshold(priv);
+
+ /* Set initial sensitivity parameters */
+ /* Set initial calibration set */
+ priv->hw_params.sens = &iwl2000_sensitivity;
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_XTAL) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_BASE_BAND);
+ if (priv->cfg->need_dc_calib)
+ priv->hw_params.calib_rt_cfg |= BIT(IWL_CALIB_CFG_DC_IDX);
+ if (priv->cfg->need_temp_offset_calib)
+ priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
+
+ priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
+
+ return 0;
+}
+
+static int iwl2030_hw_channel_switch(struct iwl_priv *priv,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ /*
+ * MULTI-FIXME
+ * See iwl_mac_channel_switch.
+ */
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct iwl6000_channel_switch_cmd cmd;
+ const struct iwl_channel_info *ch_info;
+ u32 switch_time_in_usec, ucode_switch_time;
+ u16 ch;
+ u32 tsf_low;
+ u8 switch_count;
+ u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
+ struct ieee80211_vif *vif = ctx->vif;
+ struct iwl_host_cmd hcmd = {
+ .id = REPLY_CHANNEL_SWITCH,
+ .len = sizeof(cmd),
+ .flags = CMD_SYNC,
+ .data = &cmd,
+ };
+
+ cmd.band = priv->band == IEEE80211_BAND_2GHZ;
+ ch = ch_switch->channel->hw_value;
+ IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
+ ctx->active.channel, ch);
+ cmd.channel = cpu_to_le16(ch);
+ cmd.rxon_flags = ctx->staging.flags;
+ cmd.rxon_filter_flags = ctx->staging.filter_flags;
+ switch_count = ch_switch->count;
+ tsf_low = ch_switch->timestamp & 0x0ffffffff;
+ /*
+ * calculate the ucode channel switch time
+ * adding TSF as one of the factor for when to switch
+ */
+ if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
+ if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
+ beacon_interval)) {
+ switch_count -= (priv->ucode_beacon_time -
+ tsf_low) / beacon_interval;
+ } else
+ switch_count = 0;
+ }
+ if (switch_count <= 1)
+ cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
+ else {
+ switch_time_in_usec =
+ vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+ ucode_switch_time = iwl_usecs_to_beacons(priv,
+ switch_time_in_usec,
+ beacon_interval);
+ cmd.switch_time = iwl_add_beacon_time(priv,
+ priv->ucode_beacon_time,
+ ucode_switch_time,
+ beacon_interval);
+ }
+ IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
+ cmd.switch_time);
+ ch_info = iwl_get_channel_info(priv, priv->band, ch);
+ if (ch_info)
+ cmd.expect_beacon = is_channel_radar(ch_info);
+ else {
+ IWL_ERR(priv, "invalid channel switch from %u to %u\n",
+ ctx->active.channel, ch);
+ return -EFAULT;
+ }
+ priv->switch_rxon.channel = cmd.channel;
+ priv->switch_rxon.switch_in_progress = true;
+
+ return iwl_send_cmd_sync(priv, &hcmd);
+}
+
+static struct iwl_lib_ops iwl2000_lib = {
+ .set_hw_params = iwl2000_hw_set_hw_params,
+ .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
+ .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
+ .txq_set_sched = iwlagn_txq_set_sched,
+ .txq_agg_enable = iwlagn_txq_agg_enable,
+ .txq_agg_disable = iwlagn_txq_agg_disable,
+ .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
+ .txq_free_tfd = iwl_hw_txq_free_tfd,
+ .txq_init = iwl_hw_tx_queue_init,
+ .rx_handler_setup = iwlagn_rx_handler_setup,
+ .setup_deferred_work = iwlagn_setup_deferred_work,
+ .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
+ .load_ucode = iwlagn_load_ucode,
+ .dump_nic_event_log = iwl_dump_nic_event_log,
+ .dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_csr = iwl_dump_csr,
+ .dump_fh = iwl_dump_fh,
+ .init_alive_start = iwlagn_init_alive_start,
+ .alive_notify = iwlagn_alive_notify,
+ .send_tx_power = iwlagn_send_tx_power,
+ .update_chain_flags = iwl_update_chain_flags,
+ .set_channel_switch = iwl2030_hw_channel_switch,
+ .apm_ops = {
+ .init = iwl_apm_init,
+ .config = iwl2000_nic_config,
+ },
+ .eeprom_ops = {
+ .regulatory_bands = {
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
+ EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_REG_BAND_52_HT40_CHANNELS
+ },
+ .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
+ .release_semaphore = iwlcore_eeprom_release_semaphore,
+ .calib_version = iwlagn_eeprom_calib_version,
+ .query_addr = iwlagn_eeprom_query_addr,
+ .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
+ },
+ .isr_ops = {
+ .isr = iwl_isr_ict,
+ .free = iwl_free_isr_ict,
+ .alloc = iwl_alloc_isr_ict,
+ .reset = iwl_reset_ict,
+ .disable = iwl_disable_ict,
+ },
+ .temp_ops = {
+ .temperature = iwlagn_temperature,
+ },
+ .debugfs_ops = {
+ .rx_stats_read = iwl_ucode_rx_stats_read,
+ .tx_stats_read = iwl_ucode_tx_stats_read,
+ .general_stats_read = iwl_ucode_general_stats_read,
+ .bt_stats_read = iwl_ucode_bt_stats_read,
+ .reply_tx_error = iwl_reply_tx_error_read,
+ },
+ .check_plcp_health = iwl_good_plcp_health,
+ .check_ack_health = iwl_good_ack_health,
+ .txfifo_flush = iwlagn_txfifo_flush,
+ .dev_txfifo_flush = iwlagn_dev_txfifo_flush,
+ .tt_ops = {
+ .lower_power_detection = iwl_tt_is_low_power_state,
+ .tt_power_mode = iwl_tt_current_power_mode,
+ .ct_kill_check = iwl_check_for_ct_kill,
+ }
+};
+
+static const struct iwl_ops iwl2000_ops = {
+ .lib = &iwl2000_lib,
+ .hcmd = &iwlagn_hcmd,
+ .utils = &iwlagn_hcmd_utils,
+ .led = &iwlagn_led_ops,
+ .ieee80211_ops = &iwlagn_hw_ops,
+};
+
+static const struct iwl_ops iwl2030_ops = {
+ .lib = &iwl2000_lib,
+ .hcmd = &iwlagn_bt_hcmd,
+ .utils = &iwlagn_hcmd_utils,
+ .led = &iwlagn_led_ops,
+ .ieee80211_ops = &iwlagn_hw_ops,
+};
+
+static const struct iwl_ops iwl200_ops = {
+ .lib = &iwl2000_lib,
+ .hcmd = &iwlagn_hcmd,
+ .utils = &iwlagn_hcmd_utils,
+ .led = &iwlagn_led_ops,
+ .ieee80211_ops = &iwlagn_hw_ops,
+};
+
+static const struct iwl_ops iwl230_ops = {
+ .lib = &iwl2000_lib,
+ .hcmd = &iwlagn_bt_hcmd,
+ .utils = &iwlagn_hcmd_utils,
+ .led = &iwlagn_led_ops,
+ .ieee80211_ops = &iwlagn_hw_ops,
+};
+
+static struct iwl_base_params iwl2000_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .pll_cfg_val = 0,
+ .set_l0s = true,
+ .use_bsm = false,
+ .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
+ .shadow_ram_support = true,
+ .led_compensation = 51,
+ .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .wd_timeout = IWL_DEF_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+ .shadow_reg_enable = true,
+};
+
+
+static struct iwl_base_params iwl2030_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
+ .pll_cfg_val = 0,
+ .set_l0s = true,
+ .use_bsm = false,
+ .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
+ .shadow_ram_support = true,
+ .led_compensation = 57,
+ .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .adv_thermal_throttle = true,
+ .support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+ .shadow_reg_enable = true,
+};
+
+static struct iwl_ht_params iwl2000_ht_params = {
+ .ht_greenfield_support = true,
+ .use_rts_for_aggregation = true, /* use rts/cts protection */
+};
+
+static struct iwl_bt_params iwl2030_bt_params = {
+ .bt_statistics = true,
+ /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
+ .advanced_bt_coexist = true,
+ .agg_time_limit = BT_AGG_THRESHOLD_DEF,
+ .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
+ .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
+ .bt_sco_disable = true,
+};
+
+#define IWL_DEVICE_2000 \
+ .fw_name_pre = IWL2000_FW_PRE, \
+ .ucode_api_max = IWL2000_UCODE_API_MAX, \
+ .ucode_api_min = IWL2000_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
+ .ops = &iwl2000_ops, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl2000_base_params, \
+ .need_dc_calib = true, \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .iq_invert = true \
+
+struct iwl_cfg iwl2000_2bgn_cfg = {
+ .name = "2000 Series 2x2 BGN",
+ IWL_DEVICE_2000,
+ .ht_params = &iwl2000_ht_params,
+};
+
+struct iwl_cfg iwl2000_2bg_cfg = {
+ .name = "2000 Series 2x2 BG",
+ IWL_DEVICE_2000,
+};
+
+#define IWL_DEVICE_2030 \
+ .fw_name_pre = IWL2030_FW_PRE, \
+ .ucode_api_max = IWL2030_UCODE_API_MAX, \
+ .ucode_api_min = IWL2030_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
+ .ops = &iwl2030_ops, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl2030_base_params, \
+ .bt_params = &iwl2030_bt_params, \
+ .need_dc_calib = true, \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .adv_pm = true, \
+ .iq_invert = true \
+
+struct iwl_cfg iwl2030_2bgn_cfg = {
+ .name = "2000 Series 2x2 BGN/BT",
+ IWL_DEVICE_2030,
+ .ht_params = &iwl2000_ht_params,
+};
+
+struct iwl_cfg iwl2030_2bg_cfg = {
+ .name = "2000 Series 2x2 BG/BT",
+ IWL_DEVICE_2030,
+};
+
+#define IWL_DEVICE_6035 \
+ .fw_name_pre = IWL2030_FW_PRE, \
+ .ucode_api_max = IWL2030_UCODE_API_MAX, \
+ .ucode_api_min = IWL2030_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_6035_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_6035_TX_POWER_VERSION, \
+ .ops = &iwl2030_ops, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl2030_base_params, \
+ .bt_params = &iwl2030_bt_params, \
+ .need_dc_calib = true, \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .adv_pm = true \
+
+struct iwl_cfg iwl6035_2agn_cfg = {
+ .name = "2000 Series 2x2 AGN/BT",
+ IWL_DEVICE_6035,
+ .ht_params = &iwl2000_ht_params,
+};
+
+struct iwl_cfg iwl6035_2abg_cfg = {
+ .name = "2000 Series 2x2 ABG/BT",
+ IWL_DEVICE_6035,
+};
+
+struct iwl_cfg iwl6035_2bg_cfg = {
+ .name = "2000 Series 2x2 BG/BT",
+ IWL_DEVICE_6035,
+};
+
+#define IWL_DEVICE_200 \
+ .fw_name_pre = IWL200_FW_PRE, \
+ .ucode_api_max = IWL200_UCODE_API_MAX, \
+ .ucode_api_min = IWL200_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
+ .ops = &iwl200_ops, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl2000_base_params, \
+ .need_dc_calib = true, \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .adv_pm = true, \
+ .rx_with_siso_diversity = true \
+
+struct iwl_cfg iwl200_bg_cfg = {
+ .name = "200 Series 1x1 BG",
+ IWL_DEVICE_200,
+};
+
+struct iwl_cfg iwl200_bgn_cfg = {
+ .name = "200 Series 1x1 BGN",
+ IWL_DEVICE_200,
+ .ht_params = &iwl2000_ht_params,
+};
+
+#define IWL_DEVICE_230 \
+ .fw_name_pre = IWL200_FW_PRE, \
+ .ucode_api_max = IWL200_UCODE_API_MAX, \
+ .ucode_api_min = IWL200_UCODE_API_MIN, \
+ .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
+ .ops = &iwl230_ops, \
+ .mod_params = &iwlagn_mod_params, \
+ .base_params = &iwl2030_base_params, \
+ .bt_params = &iwl2030_bt_params, \
+ .need_dc_calib = true, \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .adv_pm = true, \
+ .rx_with_siso_diversity = true \
+
+struct iwl_cfg iwl230_bg_cfg = {
+ .name = "200 Series 1x1 BG/BT",
+ IWL_DEVICE_230,
+};
+
+struct iwl_cfg iwl230_bgn_cfg = {
+ .name = "200 Series 1x1 BGN/BT",
+ IWL_DEVICE_230,
+ .ht_params = &iwl2000_ht_params,
+};
+
+MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL200_MODULE_FIRMWARE(IWL200_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
index abe2b73..dc7c3a4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
@@ -59,33 +59,6 @@ static int iwl3945_send_led_cmd(struct iwl_priv *priv,
return iwl_send_cmd(priv, &cmd);
}
-/* Set led on command */
-static int iwl3945_led_on(struct iwl_priv *priv)
-{
- struct iwl_led_cmd led_cmd = {
- .id = IWL_LED_LINK,
- .on = IWL_LED_SOLID,
- .off = 0,
- .interval = IWL_DEF_LED_INTRVL
- };
- return iwl3945_send_led_cmd(priv, &led_cmd);
-}
-
-/* Set led off command */
-static int iwl3945_led_off(struct iwl_priv *priv)
-{
- struct iwl_led_cmd led_cmd = {
- .id = IWL_LED_LINK,
- .on = 0,
- .off = 0,
- .interval = IWL_DEF_LED_INTRVL
- };
- IWL_DEBUG_LED(priv, "led off\n");
- return iwl3945_send_led_cmd(priv, &led_cmd);
-}
-
const struct iwl_led_ops iwl3945_led_ops = {
.cmd = iwl3945_send_led_cmd,
- .on = iwl3945_led_on,
- .off = iwl3945_led_off,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 39b6f16..166e9f7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -528,10 +528,11 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
rx_status.flag = 0;
rx_status.mactime = le64_to_cpu(rx_end->timestamp);
- rx_status.freq =
- ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel));
rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
+ rx_status.band);
rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
if (rx_status.band == IEEE80211_BAND_5GHZ)
@@ -695,8 +696,7 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
/* We need to figure out how to get the sta->supp_rates while
* in this running context */
- rate_mask = IWL_RATES_MASK;
-
+ rate_mask = IWL_RATES_MASK_3945;
/* Set retry limit on DATA packets and Probe Responses*/
if (ieee80211_is_probe_resp(fc))
@@ -1583,7 +1583,7 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
ref_temp);
/* set tx power value for all rates, OFDM and CCK */
- for (rate_index = 0; rate_index < IWL_RATE_COUNT;
+ for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945;
rate_index++) {
int power_idx =
ch_info->power_info[rate_index].base_power_index;
@@ -1823,7 +1823,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
/* If we issue a new RXON command which required a tune then we must
* send a new TXPOWER command or we won't be able to Tx any frames */
- rc = priv->cfg->ops->lib->send_tx_power(priv);
+ rc = iwl_set_tx_power(priv, priv->tx_power_next, true);
if (rc) {
IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
return rc;
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 91a9f52..8998ed1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -251,14 +251,6 @@ static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
*/
static void iwl4965_init_alive_start(struct iwl_priv *priv)
{
- /* Check alive response for "valid" sign from uCode */
- if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
- goto restart;
- }
-
/* Bootstrap uCode has loaded initialize uCode ... verify inst image.
* This is a paranoid check, because we would not have gotten the
* "initialize" alive if code weren't properly loaded. */
@@ -1571,7 +1563,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
/* If we issue a new RXON command which required a tune then we must
* send a new TXPOWER command or we won't be able to Tx any frames */
- ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
+ ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
if (ret) {
IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
return ret;
@@ -2274,6 +2266,29 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
spin_unlock_irqrestore(&priv->sta_lock, flags);
}
+static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
+
+ IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
+ "tsf:0x%.8x%.8x rate:%d\n",
+ le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
+ beacon->beacon_notify_hdr.failure_frame,
+ le32_to_cpu(beacon->ibss_mgr_status),
+ le32_to_cpu(beacon->high_tsf),
+ le32_to_cpu(beacon->low_tsf), rate);
+#endif
+
+ priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+
+ if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
+ queue_work(priv->workqueue, &priv->beacon_update);
+}
+
static int iwl4965_calc_rssi(struct iwl_priv *priv,
struct iwl_rx_phy_res *rx_resp)
{
@@ -2316,6 +2331,12 @@ static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
priv->rx_handlers[REPLY_RX] = iwlagn_rx_reply_rx;
/* Tx response */
priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
+ priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
+
+ /* set up notification wait support */
+ spin_lock_init(&priv->_agn.notif_wait_lock);
+ INIT_LIST_HEAD(&priv->_agn.notif_waits);
+ init_waitqueue_head(&priv->_agn.notif_waitq);
}
static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index ef36aff..f6493f7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -67,13 +67,13 @@
#define _IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode"
#define IWL6050_MODULE_FIRMWARE(api) _IWL6050_MODULE_FIRMWARE(api)
-#define IWL6000G2A_FW_PRE "iwlwifi-6000g2a-"
-#define _IWL6000G2A_MODULE_FIRMWARE(api) IWL6000G2A_FW_PRE #api ".ucode"
-#define IWL6000G2A_MODULE_FIRMWARE(api) _IWL6000G2A_MODULE_FIRMWARE(api)
+#define IWL6005_FW_PRE "iwlwifi-6000g2a-"
+#define _IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE #api ".ucode"
+#define IWL6005_MODULE_FIRMWARE(api) _IWL6005_MODULE_FIRMWARE(api)
-#define IWL6000G2B_FW_PRE "iwlwifi-6000g2b-"
-#define _IWL6000G2B_MODULE_FIRMWARE(api) IWL6000G2B_FW_PRE #api ".ucode"
-#define IWL6000G2B_MODULE_FIRMWARE(api) _IWL6000G2B_MODULE_FIRMWARE(api)
+#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
+#define _IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE #api ".ucode"
+#define IWL6030_MODULE_FIRMWARE(api) _IWL6030_MODULE_FIRMWARE(api)
static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
{
@@ -90,7 +90,7 @@ static void iwl6050_additional_nic_config(struct iwl_priv *priv)
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
}
-static void iwl6050g2_additional_nic_config(struct iwl_priv *priv)
+static void iwl6150_additional_nic_config(struct iwl_priv *priv)
{
/* Indicate calibration version to uCode. */
if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6)
@@ -354,7 +354,7 @@ static struct iwl_lib_ops iwl6000_lib = {
}
};
-static struct iwl_lib_ops iwl6000g2b_lib = {
+static struct iwl_lib_ops iwl6030_lib = {
.set_hw_params = iwl6000_hw_set_hw_params,
.txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
.txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
@@ -430,8 +430,8 @@ static struct iwl_nic_ops iwl6050_nic_ops = {
.additional_nic_config = &iwl6050_additional_nic_config,
};
-static struct iwl_nic_ops iwl6050g2_nic_ops = {
- .additional_nic_config = &iwl6050g2_additional_nic_config,
+static struct iwl_nic_ops iwl6150_nic_ops = {
+ .additional_nic_config = &iwl6150_additional_nic_config,
};
static const struct iwl_ops iwl6000_ops = {
@@ -451,17 +451,17 @@ static const struct iwl_ops iwl6050_ops = {
.ieee80211_ops = &iwlagn_hw_ops,
};
-static const struct iwl_ops iwl6050g2_ops = {
+static const struct iwl_ops iwl6150_ops = {
.lib = &iwl6000_lib,
.hcmd = &iwlagn_hcmd,
.utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
- .nic = &iwl6050g2_nic_ops,
+ .nic = &iwl6150_nic_ops,
.ieee80211_ops = &iwlagn_hw_ops,
};
-static const struct iwl_ops iwl6000g2b_ops = {
- .lib = &iwl6000g2b_lib,
+static const struct iwl_ops iwl6030_ops = {
+ .lib = &iwl6030_lib,
.hcmd = &iwlagn_bt_hcmd,
.utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
@@ -479,7 +479,6 @@ static struct iwl_base_params iwl6000_base_params = {
.shadow_ram_support = true,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -503,7 +502,6 @@ static struct iwl_base_params iwl6050_base_params = {
.shadow_ram_support = true,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -526,7 +524,6 @@ static struct iwl_base_params iwl6000_g2_base_params = {
.shadow_ram_support = true,
.led_compensation = 57,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
@@ -555,11 +552,11 @@ static struct iwl_bt_params iwl6000_bt_params = {
};
#define IWL_DEVICE_6005 \
- .fw_name_pre = IWL6000G2A_FW_PRE, \
+ .fw_name_pre = IWL6005_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
- .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, \
- .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, \
+ .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
.ops = &iwl6000_ops, \
.mod_params = &iwlagn_mod_params, \
.base_params = &iwl6000_g2_base_params, \
@@ -584,12 +581,12 @@ struct iwl_cfg iwl6005_2bg_cfg = {
};
#define IWL_DEVICE_6030 \
- .fw_name_pre = IWL6000G2B_FW_PRE, \
+ .fw_name_pre = IWL6030_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
- .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, \
- .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, \
- .ops = &iwl6000g2b_ops, \
+ .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
+ .ops = &iwl6030_ops, \
.mod_params = &iwlagn_mod_params, \
.base_params = &iwl6000_g2_base_params, \
.bt_params = &iwl6000_bt_params, \
@@ -708,9 +705,9 @@ struct iwl_cfg iwl6150_bgn_cfg = {
.fw_name_pre = IWL6050_FW_PRE,
.ucode_api_max = IWL6050_UCODE_API_MAX,
.ucode_api_min = IWL6050_UCODE_API_MIN,
- .eeprom_ver = EEPROM_6050G2_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_6050G2_TX_POWER_VERSION,
- .ops = &iwl6050g2_ops,
+ .eeprom_ver = EEPROM_6150_EEPROM_VERSION,
+ .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,
+ .ops = &iwl6150_ops,
.mod_params = &iwlagn_mod_params,
.base_params = &iwl6050_base_params,
.ht_params = &iwl6000_ht_params,
@@ -736,5 +733,5 @@ struct iwl_cfg iwl6000_3agn_cfg = {
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL6000G2B_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index d16bb5e..9006293 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -631,8 +631,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv, void *resp)
}
spin_lock_irqsave(&priv->lock, flags);
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
rx_info = &(((struct iwl_bt_notif_statistics *)resp)->
rx.general.common);
ofdm = &(((struct iwl_bt_notif_statistics *)resp)->rx.ofdm);
@@ -897,8 +896,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
}
spin_lock_irqsave(&priv->lock, flags);
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
rx_info = &(((struct iwl_bt_notif_statistics *)stat_resp)->
rx.general.common);
} else {
@@ -913,8 +911,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
rxon_chnum = le16_to_cpu(ctx->staging.channel);
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
stat_band24 = !!(((struct iwl_bt_notif_statistics *)
stat_resp)->flag &
STATISTICS_REPLY_FLG_BAND_24G_MSK);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
index a6dbd89..b500aaa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
@@ -39,8 +39,7 @@ static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
int p = 0;
u32 flag;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics)
+ if (iwl_bt_statistics(priv))
flag = le32_to_cpu(priv->_agn.statistics_bt.flag);
else
flag = le32_to_cpu(priv->_agn.statistics.flag);
@@ -89,8 +88,7 @@ ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
ofdm = &priv->_agn.statistics_bt.rx.ofdm;
cck = &priv->_agn.statistics_bt.rx.cck;
general = &priv->_agn.statistics_bt.rx.general.common;
@@ -536,8 +534,7 @@ ssize_t iwl_ucode_tx_stats_read(struct file *file,
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
tx = &priv->_agn.statistics_bt.tx;
accum_tx = &priv->_agn.accum_statistics_bt.tx;
delta_tx = &priv->_agn.delta_statistics_bt.tx;
@@ -737,8 +734,7 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
* the last statistics notification from uCode
* might not reflect the current uCode activity
*/
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
general = &priv->_agn.statistics_bt.general.common;
dbg = &priv->_agn.statistics_bt.general.common.dbg;
div = &priv->_agn.statistics_bt.general.common.div;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index 366340f..41543ad 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -305,7 +305,11 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
cmd.slots[0].type = 0; /* BSS */
cmd.slots[1].type = 1; /* PAN */
- if (ctx_bss->vif && ctx_pan->vif) {
+ if (priv->_agn.hw_roc_channel) {
+ /* both contexts must be used for this to happen */
+ slot1 = priv->_agn.hw_roc_duration;
+ slot0 = IWL_MIN_SLOT_TIME;
+ } else if (ctx_bss->vif && ctx_pan->vif) {
int bcnint = ctx_pan->vif->bss_conf.beacon_int;
int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
@@ -330,12 +334,12 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
if (test_bit(STATUS_SCAN_HW, &priv->status) ||
(!ctx_bss->vif->bss_conf.idle &&
!ctx_bss->vif->bss_conf.assoc)) {
- slot0 = dtim * bcnint * 3 - 20;
- slot1 = 20;
+ slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
+ slot1 = IWL_MIN_SLOT_TIME;
} else if (!ctx_pan->vif->bss_conf.idle &&
!ctx_pan->vif->bss_conf.assoc) {
- slot1 = bcnint * 3 - 20;
- slot0 = 20;
+ slot1 = bcnint * 3 - IWL_MIN_SLOT_TIME;
+ slot0 = IWL_MIN_SLOT_TIME;
}
} else if (ctx_pan->vif) {
slot0 = 0;
@@ -344,8 +348,8 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
if (test_bit(STATUS_SCAN_HW, &priv->status)) {
- slot0 = slot1 * 3 - 20;
- slot1 = 20;
+ slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
+ slot1 = IWL_MIN_SLOT_TIME;
}
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.c b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
index 1a24946..c1190d9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
@@ -63,23 +63,11 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
}
/* Set led register off */
-static int iwl_led_on_reg(struct iwl_priv *priv)
+void iwlagn_led_enable(struct iwl_priv *priv)
{
- IWL_DEBUG_LED(priv, "led on\n");
iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
- return 0;
-}
-
-/* Set led register off */
-static int iwl_led_off_reg(struct iwl_priv *priv)
-{
- IWL_DEBUG_LED(priv, "LED Reg off\n");
- iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF);
- return 0;
}
const struct iwl_led_ops iwlagn_led_ops = {
.cmd = iwl_send_led_cmd,
- .on = iwl_led_on_reg,
- .off = iwl_led_off_reg,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.h b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
index a594e4fd..96f323d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
@@ -28,5 +28,6 @@
#define __iwl_agn_led_h__
extern const struct iwl_led_ops iwlagn_led_ops;
+void iwlagn_led_enable(struct iwl_priv *priv);
#endif /* __iwl_agn_led_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 3dee87e..3aa4864 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -473,6 +473,11 @@ void iwlagn_rx_handler_setup(struct iwl_priv *priv)
priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
iwlagn_rx_calib_complete;
priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
+
+ /* set up notification wait support */
+ spin_lock_init(&priv->_agn.notif_wait_lock);
+ INIT_LIST_HEAD(&priv->_agn.notif_waits);
+ init_waitqueue_head(&priv->_agn.notif_waitq);
}
void iwlagn_setup_deferred_work(struct iwl_priv *priv)
@@ -1157,10 +1162,11 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv,
/* rx_status carries information about the packet to mac80211 */
rx_status.mactime = le64_to_cpu(phy_res->timestamp);
- rx_status.freq =
- ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
+ rx_status.band);
rx_status.rate_idx =
iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
rx_status.flag = 0;
@@ -1389,15 +1395,12 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
u32 extra;
u32 suspend_time = 100;
u32 scan_suspend_time = 100;
- unsigned long flags;
IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
- spin_lock_irqsave(&priv->lock, flags);
if (priv->is_internal_short_scan)
interval = 0;
else
interval = vif->bss_conf.beacon_int;
- spin_unlock_irqrestore(&priv->lock, flags);
scan->suspend_time = 0;
scan->max_out_time = cpu_to_le32(200 * 1024);
@@ -1857,21 +1860,6 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, sizeof(bt_cmd), &bt_cmd))
IWL_ERR(priv, "failed to send BT Coex Config\n");
- /*
- * When we are doing a restart, need to also reconfigure BT
- * SCO to the device. If not doing a restart, bt_sco_active
- * will always be false, so there's no need to have an extra
- * variable to check for it.
- */
- if (priv->bt_sco_active) {
- struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 };
-
- if (priv->bt_sco_active)
- sco_cmd.flags |= IWLAGN_BT_SCO_ACTIVE;
- if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_SCO,
- sizeof(sco_cmd), &sco_cmd))
- IWL_ERR(priv, "failed to send BT SCO command\n");
- }
}
static void iwlagn_bt_traffic_change_work(struct work_struct *work)
@@ -2032,7 +2020,6 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
unsigned long flags;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif;
- struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 };
struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n");
@@ -2063,15 +2050,6 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
queue_work(priv->workqueue,
&priv->bt_traffic_change_work);
}
- if (priv->bt_sco_active !=
- (uart_msg->frame3 & BT_UART_MSG_FRAME3SCOESCO_MSK)) {
- priv->bt_sco_active = uart_msg->frame3 &
- BT_UART_MSG_FRAME3SCOESCO_MSK;
- if (priv->bt_sco_active)
- sco_cmd.flags |= IWLAGN_BT_SCO_ACTIVE;
- iwl_send_cmd_pdu_async(priv, REPLY_BT_COEX_SCO,
- sizeof(sco_cmd), &sco_cmd, NULL);
- }
}
iwlagn_set_kill_msk(priv, uart_msg);
@@ -2389,3 +2367,44 @@ int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
}
return 0;
}
+
+/* notification wait support */
+void iwlagn_init_notification_wait(struct iwl_priv *priv,
+ struct iwl_notification_wait *wait_entry,
+ void (*fn)(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt),
+ u8 cmd)
+{
+ wait_entry->fn = fn;
+ wait_entry->cmd = cmd;
+ wait_entry->triggered = false;
+
+ spin_lock_bh(&priv->_agn.notif_wait_lock);
+ list_add(&wait_entry->list, &priv->_agn.notif_waits);
+ spin_unlock_bh(&priv->_agn.notif_wait_lock);
+}
+
+signed long iwlagn_wait_notification(struct iwl_priv *priv,
+ struct iwl_notification_wait *wait_entry,
+ unsigned long timeout)
+{
+ int ret;
+
+ ret = wait_event_timeout(priv->_agn.notif_waitq,
+ &wait_entry->triggered,
+ timeout);
+
+ spin_lock_bh(&priv->_agn.notif_wait_lock);
+ list_del(&wait_entry->list);
+ spin_unlock_bh(&priv->_agn.notif_wait_lock);
+
+ return ret;
+}
+
+void iwlagn_remove_notification(struct iwl_priv *priv,
+ struct iwl_notification_wait *wait_entry)
+{
+ spin_lock_bh(&priv->_agn.notif_wait_lock);
+ list_del(&wait_entry->list);
+ spin_unlock_bh(&priv->_agn.notif_wait_lock);
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 75fcd30..d03b473 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -179,31 +179,31 @@ static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
};
static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
- {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
- {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
- {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
+ {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */
+ {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */
+ {0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */
+ {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
};
static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
{0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
- {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
- {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
+ {0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
+ {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
};
static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
- {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
- {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
- {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
+ {0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
+ {0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
+ {0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
+ {0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
};
static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
{0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
- {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
- {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
+ {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
+ {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
};
static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
@@ -2890,6 +2890,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
u8 ant_toggle_cnt = 0;
u8 use_ht_possible = 1;
u8 valid_tx_ant = 0;
+ struct iwl_station_priv *sta_priv =
+ container_of(lq_sta, struct iwl_station_priv, lq_sta);
struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
/* Override starting rate (index 0) if needed for debug purposes */
@@ -3008,7 +3010,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
repeat_rate--;
}
- lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+ lq_cmd->agg_params.agg_frame_cnt_limit =
+ sta_priv->max_agg_bufsize ?: LINK_QUAL_AGG_FRAME_LIMIT_DEF;
lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
lq_cmd->agg_params.agg_time_limit =
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 75e50d3..184828c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -213,6 +213,7 @@ enum {
IWL_CCK_BASIC_RATES_MASK)
#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
+#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
#define IWL_INVALID_VALUE -1
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
index bbd40b7..b192ca8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
@@ -73,8 +73,7 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
int bcn_silence_a, bcn_silence_b, bcn_silence_c;
int last_rx_noise;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics)
+ if (iwl_bt_statistics(priv))
rx_info = &(priv->_agn.statistics_bt.rx.general.common);
else
rx_info = &(priv->_agn.statistics.rx.general);
@@ -125,8 +124,7 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
struct statistics_general_common *general, *accum_general;
struct statistics_tx *tx, *accum_tx;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
prev_stats = (__le32 *)&priv->_agn.statistics_bt;
accum_stats = (u32 *)&priv->_agn.accum_statistics_bt;
size = sizeof(struct iwl_bt_notif_statistics);
@@ -207,8 +205,7 @@ bool iwl_good_plcp_health(struct iwl_priv *priv,
struct statistics_rx_phy *ofdm;
struct statistics_rx_ht_phy *ofdm_ht;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
ofdm = &pkt->u.stats_bt.rx.ofdm;
ofdm_ht = &pkt->u.stats_bt.rx.ofdm_ht;
combined_plcp_delta =
@@ -265,8 +262,7 @@ void iwl_rx_statistics(struct iwl_priv *priv,
int change;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
IWL_DEBUG_RX(priv,
"Statistics notification received (%d vs %d).\n",
(int)sizeof(struct iwl_bt_notif_statistics),
@@ -304,8 +300,7 @@ void iwl_rx_statistics(struct iwl_priv *priv,
iwl_recover_from_statistics(priv, pkt);
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics)
+ if (iwl_bt_statistics(priv))
memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt,
sizeof(priv->_agn.statistics_bt));
else
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 6d140bd..6c2adc5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -52,10 +52,14 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct iwl_rxon_cmd *send)
{
+ struct iwl_notification_wait disable_wait;
__le32 old_filter = send->filter_flags;
u8 old_dev_type = send->dev_type;
int ret;
+ iwlagn_init_notification_wait(priv, &disable_wait, NULL,
+ REPLY_WIPAN_DEACTIVATION_COMPLETE);
+
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
send->dev_type = RXON_DEV_TYPE_P2P;
ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send);
@@ -63,11 +67,18 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
send->filter_flags = old_filter;
send->dev_type = old_dev_type;
- if (ret)
+ if (ret) {
IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
-
- /* FIXME: WAIT FOR PAN DISABLE */
- msleep(300);
+ iwlagn_remove_notification(priv, &disable_wait);
+ } else {
+ signed long wait_res;
+
+ wait_res = iwlagn_wait_notification(priv, &disable_wait, HZ);
+ if (wait_res == 0) {
+ IWL_ERR(priv, "Timed out waiting for PAN disable\n");
+ ret = -EIO;
+ }
+ }
return ret;
}
@@ -145,6 +156,23 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
/* always get timestamp with Rx frame */
ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
+ if (ctx->ctxid == IWL_RXON_CTX_PAN && priv->_agn.hw_roc_channel) {
+ struct ieee80211_channel *chan = priv->_agn.hw_roc_channel;
+
+ iwl_set_rxon_channel(priv, chan, ctx);
+ iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
+ ctx->staging.filter_flags |=
+ RXON_FILTER_ASSOC_MSK |
+ RXON_FILTER_PROMISC_MSK |
+ RXON_FILTER_CTL2HOST_MSK;
+ ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
+ new_assoc = true;
+
+ if (memcmp(&ctx->staging, &ctx->active,
+ sizeof(ctx->staging)) == 0)
+ return 0;
+ }
+
if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@@ -288,10 +316,9 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
* If we issue a new RXON command which required a tune then we must
* send a new TXPOWER command or we won't be able to Tx any frames.
*
- * FIXME: which RXON requires a tune? Can we optimise this out in
- * some cases?
+ * It's expected we set power here if channel is changing.
*/
- ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
+ ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
if (ret) {
IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
return ret;
@@ -546,12 +573,10 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
if (changes & BSS_CHANGED_ASSOC) {
if (bss_conf->assoc) {
- iwl_led_associate(priv);
priv->timestamp = bss_conf->timestamp;
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
} else {
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl_led_disassociate(priv);
}
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 24a11b8..266490d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -539,7 +539,14 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
unsigned long flags;
bool is_agg = false;
- if (info->control.vif)
+ /*
+ * If the frame needs to go out off-channel, then
+ * we'll have put the PAN context to that channel,
+ * so make the frame go out there.
+ */
+ if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
+ ctx = &priv->contexts[IWL_RXON_CTX_PAN];
+ else if (info->control.vif)
ctx = iwl_rxon_ctx_from_vif(info->control.vif);
spin_lock_irqsave(&priv->lock, flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index 24dabcd..d807e5e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -308,14 +308,6 @@ void iwlagn_init_alive_start(struct iwl_priv *priv)
{
int ret = 0;
- /* Check alive response for "valid" sign from uCode */
- if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
- goto restart;
- }
-
/* initialize uCode was loaded... verify inst image.
* This is a paranoid check, because we would not have gotten the
* "initialize" alive if code weren't properly loaded. */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index c1cfd99..8ee810f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -59,6 +59,7 @@
#include "iwl-sta.h"
#include "iwl-agn-calib.h"
#include "iwl-agn.h"
+#include "iwl-agn-led.h"
/******************************************************************************
@@ -461,8 +462,12 @@ static void iwl_rx_reply_alive(struct iwl_priv *priv,
if (palive->is_valid == UCODE_VALID_OK)
queue_delayed_work(priv->workqueue, pwork,
msecs_to_jiffies(5));
- else
- IWL_WARN(priv, "uCode did not respond OK.\n");
+ else {
+ IWL_WARN(priv, "%s uCode did not respond OK.\n",
+ (palive->ver_subtype == INITIALIZE_SUBTYPE) ?
+ "init" : "runtime");
+ queue_work(priv->workqueue, &priv->restart);
+ }
}
static void iwl_bg_beacon_update(struct work_struct *work)
@@ -699,18 +704,18 @@ static void iwl_bg_ucode_trace(unsigned long data)
}
}
-static void iwl_rx_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+static void iwlagn_rx_beacon_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl4965_beacon_notif *beacon =
- (struct iwl4965_beacon_notif *)pkt->u.raw;
+ struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw;
#ifdef CONFIG_IWLWIFI_DEBUG
+ u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
- IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
- "tsf %d %d rate %d\n",
- le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
+ IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
+ "tsf:0x%.8x%.8x rate:%d\n",
+ status & TX_STATUS_MSK,
beacon->beacon_notify_hdr.failure_frame,
le32_to_cpu(beacon->ibss_mgr_status),
le32_to_cpu(beacon->high_tsf),
@@ -813,7 +818,7 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
iwl_rx_pm_debug_statistics_notif;
- priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
+ priv->rx_handlers[BEACON_NOTIFICATION] = iwlagn_rx_beacon_notif;
/*
* The same handler is used for both the REPLY to a discrete
@@ -846,7 +851,7 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
* the appropriate handlers, including command responses,
* frame-received notifications, and other notifications.
*/
-void iwl_rx_handle(struct iwl_priv *priv)
+static void iwl_rx_handle(struct iwl_priv *priv)
{
struct iwl_rx_mem_buffer *rxb;
struct iwl_rx_packet *pkt;
@@ -910,6 +915,27 @@ void iwl_rx_handle(struct iwl_priv *priv)
(pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
(pkt->hdr.cmd != REPLY_TX);
+ /*
+ * Do the notification wait before RX handlers so
+ * even if the RX handler consumes the RXB we have
+ * access to it in the notification wait entry.
+ */
+ if (!list_empty(&priv->_agn.notif_waits)) {
+ struct iwl_notification_wait *w;
+
+ spin_lock(&priv->_agn.notif_wait_lock);
+ list_for_each_entry(w, &priv->_agn.notif_waits, list) {
+ if (w->cmd == pkt->hdr.cmd) {
+ w->triggered = true;
+ if (w->fn)
+ w->fn(priv, pkt);
+ }
+ }
+ spin_unlock(&priv->_agn.notif_wait_lock);
+
+ wake_up_all(&priv->_agn.notif_waitq);
+ }
+
/* Based on type of command response or notification,
* handle those that need handling via function in
* rx_handlers table. See iwl_setup_rx_handlers() */
@@ -2632,13 +2658,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
- if (priv->card_alive.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Alive failed.\n");
- goto restart;
- }
-
/* Initialize uCode has loaded Runtime uCode ... verify inst image.
* This is a paranoid check, because we would not have gotten the
* "runtime" alive if code weren't properly loaded. */
@@ -2726,8 +2745,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
/* At this point, the NIC is initialized and operational */
iwl_rf_kill_ct_config(priv);
- iwl_leds_init(priv);
-
IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
wake_up_interruptible(&priv->wait_command_queue);
@@ -2769,7 +2786,6 @@ static void __iwl_down(struct iwl_priv *priv)
priv->cfg->bt_params->bt_init_traffic_load;
else
priv->bt_traffic_load = 0;
- priv->bt_sco_active = false;
priv->bt_full_concurrent = false;
priv->bt_ci_compliance = 0;
@@ -3063,8 +3079,7 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
}
if (priv->start_calib) {
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_statistics) {
+ if (iwl_bt_statistics(priv)) {
iwl_chain_noise_calibration(priv,
(void *)&priv->_agn.statistics_bt);
iwl_sensitivity_calibration(priv,
@@ -3089,7 +3104,7 @@ static void iwl_bg_restart(struct work_struct *data)
if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
struct iwl_rxon_context *ctx;
- bool bt_sco, bt_full_concurrent;
+ bool bt_full_concurrent;
u8 bt_ci_compliance;
u8 bt_load;
u8 bt_status;
@@ -3108,7 +3123,6 @@ static void iwl_bg_restart(struct work_struct *data)
* re-configure the hw when we reconfigure the BT
* command.
*/
- bt_sco = priv->bt_sco_active;
bt_full_concurrent = priv->bt_full_concurrent;
bt_ci_compliance = priv->bt_ci_compliance;
bt_load = priv->bt_traffic_load;
@@ -3116,7 +3130,6 @@ static void iwl_bg_restart(struct work_struct *data)
__iwl_down(priv);
- priv->bt_sco_active = bt_sco;
priv->bt_full_concurrent = bt_full_concurrent;
priv->bt_ci_compliance = bt_ci_compliance;
priv->bt_traffic_load = bt_load;
@@ -3178,6 +3191,8 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+ hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+
if (!priv->cfg->base_params->broken_powersave)
hw->flags |= IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
@@ -3194,8 +3209,11 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
}
+ hw->wiphy->max_remain_on_channel_duration = 1000;
+
hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_DISABLE_BEACON_HINTS;
+ WIPHY_FLAG_DISABLE_BEACON_HINTS |
+ WIPHY_FLAG_IBSS_RSN;
/*
* For now, disable PS by default because it affects
@@ -3219,6 +3237,8 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&priv->bands[IEEE80211_BAND_5GHZ];
+ iwl_leds_init(priv);
+
ret = ieee80211_register_hw(priv->hw);
if (ret) {
IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
@@ -3263,7 +3283,7 @@ int iwlagn_mac_start(struct ieee80211_hw *hw)
}
}
- iwl_led_start(priv);
+ iwlagn_led_enable(priv);
out:
priv->is_open = 1;
@@ -3345,6 +3365,14 @@ int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
+ /*
+ * To support IBSS RSN, don't program group keys in IBSS, the
+ * hardware will then not attempt to decrypt the frames.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
sta_id = iwl_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
if (sta_id == IWL_INVALID_STATION)
return -EINVAL;
@@ -3399,10 +3427,12 @@ int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
{
struct iwl_priv *priv = hw->priv;
int ret = -EINVAL;
+ struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
sta->addr, tid);
@@ -3457,11 +3487,28 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
}
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
+ /*
+ * If the limit is 0, then it wasn't initialised yet,
+ * use the default. We can do that since we take the
+ * minimum below, and we don't want to go above our
+ * default due to hardware restrictions.
+ */
+ if (sta_priv->max_agg_bufsize == 0)
+ sta_priv->max_agg_bufsize =
+ LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+
+ /*
+ * Even though in theory the peer could have different
+ * aggregation reorder buffer sizes for different sessions,
+ * our ucode doesn't allow for that and has a global limit
+ * for each station. Therefore, use the minimum of all the
+ * aggregation sessions and our default value.
+ */
+ sta_priv->max_agg_bufsize =
+ min(sta_priv->max_agg_bufsize, buf_size);
+
if (priv->cfg->ht_params &&
priv->cfg->ht_params->use_rts_for_aggregation) {
- struct iwl_station_priv *sta_priv =
- (void *) sta->drv_priv;
-
/*
* switch to RTS/CTS if it is the prefer protection
* method for HT traffic
@@ -3469,9 +3516,13 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
sta_priv->lq_sta.lq.general_params.flags |=
LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
- iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
- &sta_priv->lq_sta.lq, CMD_ASYNC, false);
}
+
+ sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
+ sta_priv->max_agg_bufsize;
+
+ iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
+ &sta_priv->lq_sta.lq, CMD_ASYNC, false);
ret = 0;
break;
}
@@ -3709,6 +3760,97 @@ done:
IWL_DEBUG_MAC80211(priv, "leave\n");
}
+static void iwlagn_disable_roc(struct iwl_priv *priv)
+{
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
+ struct ieee80211_channel *chan = ACCESS_ONCE(priv->hw->conf.channel);
+
+ lockdep_assert_held(&priv->mutex);
+
+ if (!ctx->is_active)
+ return;
+
+ ctx->staging.dev_type = RXON_DEV_TYPE_2STA;
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ iwl_set_rxon_channel(priv, chan, ctx);
+ iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
+
+ priv->_agn.hw_roc_channel = NULL;
+
+ iwlcore_commit_rxon(priv, ctx);
+
+ ctx->is_active = false;
+}
+
+static void iwlagn_bg_roc_done(struct work_struct *work)
+{
+ struct iwl_priv *priv = container_of(work, struct iwl_priv,
+ _agn.hw_roc_work.work);
+
+ mutex_lock(&priv->mutex);
+ ieee80211_remain_on_channel_expired(priv->hw);
+ iwlagn_disable_roc(priv);
+ mutex_unlock(&priv->mutex);
+}
+
+#ifdef CONFIG_IWL5000
+static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_channel *channel,
+ enum nl80211_channel_type channel_type,
+ int duration)
+{
+ struct iwl_priv *priv = hw->priv;
+ int err = 0;
+
+ if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+ return -EOPNOTSUPP;
+
+ if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes &
+ BIT(NL80211_IFTYPE_P2P_CLIENT)))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->mutex);
+
+ if (priv->contexts[IWL_RXON_CTX_PAN].is_active ||
+ test_bit(STATUS_SCAN_HW, &priv->status)) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ priv->contexts[IWL_RXON_CTX_PAN].is_active = true;
+ priv->_agn.hw_roc_channel = channel;
+ priv->_agn.hw_roc_chantype = channel_type;
+ priv->_agn.hw_roc_duration = DIV_ROUND_UP(duration * 1000, 1024);
+ iwlcore_commit_rxon(priv, &priv->contexts[IWL_RXON_CTX_PAN]);
+ queue_delayed_work(priv->workqueue, &priv->_agn.hw_roc_work,
+ msecs_to_jiffies(duration + 20));
+
+ msleep(IWL_MIN_SLOT_TIME); /* TU is almost ms */
+ ieee80211_ready_on_channel(priv->hw);
+
+ out:
+ mutex_unlock(&priv->mutex);
+
+ return err;
+}
+
+static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+ return -EOPNOTSUPP;
+
+ cancel_delayed_work_sync(&priv->_agn.hw_roc_work);
+
+ mutex_lock(&priv->mutex);
+ iwlagn_disable_roc(priv);
+ mutex_unlock(&priv->mutex);
+
+ return 0;
+}
+#endif
+
/*****************************************************************************
*
* driver setup and teardown
@@ -3730,6 +3872,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start);
INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start);
+ INIT_DELAYED_WORK(&priv->_agn.hw_roc_work, iwlagn_bg_roc_done);
iwl_setup_scan_deferred_work(priv);
@@ -3898,6 +4041,8 @@ struct ieee80211_ops iwlagn_hw_ops = {
.channel_switch = iwlagn_mac_channel_switch,
.flush = iwlagn_mac_flush,
.tx_last_beacon = iwl_mac_tx_last_beacon,
+ .remain_on_channel = iwl_mac_remain_on_channel,
+ .cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel,
};
#endif
@@ -4025,6 +4170,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
+#ifdef CONFIG_IWL_P2P
+ priv->contexts[IWL_RXON_CTX_PAN].interface_modes |=
+ BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
+#endif
priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
@@ -4272,6 +4421,9 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
* we need to set STATUS_EXIT_PENDING bit.
*/
set_bit(STATUS_EXIT_PENDING, &priv->status);
+
+ iwl_leds_exit(priv);
+
if (priv->mac80211_registered) {
ieee80211_unregister_hw(priv->hw);
priv->mac80211_registered = 0;
@@ -4492,6 +4644,49 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)},
{IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)},
+/* 2x00 Series */
+ {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
+
+/* 2x30 Series */
+ {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)},
+
+/* 6x35 Series */
+ {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)},
+ {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)},
+
+/* 200 Series */
+ {IWL_PCI_DEVICE(0x0894, 0x0022, iwl200_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0895, 0x0222, iwl200_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0422, iwl200_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0026, iwl200_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0895, 0x0226, iwl200_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0894, 0x0426, iwl200_bg_cfg)},
+
+/* 230 Series */
+ {IWL_PCI_DEVICE(0x0892, 0x0062, iwl230_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0893, 0x0262, iwl230_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0892, 0x0462, iwl230_bgn_cfg)},
+ {IWL_PCI_DEVICE(0x0892, 0x0066, iwl230_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0893, 0x0266, iwl230_bg_cfg)},
+ {IWL_PCI_DEVICE(0x0892, 0x0466, iwl230_bg_cfg)},
+
#endif /* CONFIG_IWL5000 */
{0}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index da30358..d00e1ea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -96,6 +96,17 @@ extern struct iwl_cfg iwl100_bgn_cfg;
extern struct iwl_cfg iwl100_bg_cfg;
extern struct iwl_cfg iwl130_bgn_cfg;
extern struct iwl_cfg iwl130_bg_cfg;
+extern struct iwl_cfg iwl2000_2bgn_cfg;
+extern struct iwl_cfg iwl2000_2bg_cfg;
+extern struct iwl_cfg iwl2030_2bgn_cfg;
+extern struct iwl_cfg iwl2030_2bg_cfg;
+extern struct iwl_cfg iwl6035_2agn_cfg;
+extern struct iwl_cfg iwl6035_2abg_cfg;
+extern struct iwl_cfg iwl6035_2bg_cfg;
+extern struct iwl_cfg iwl200_bg_cfg;
+extern struct iwl_cfg iwl200_bgn_cfg;
+extern struct iwl_cfg iwl230_bg_cfg;
+extern struct iwl_cfg iwl230_bgn_cfg;
extern struct iwl_mod_params iwlagn_mod_params;
extern struct iwl_hcmd_ops iwlagn_hcmd;
@@ -185,7 +196,6 @@ void iwlagn_rx_reply_rx(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
-void iwl_rx_handle(struct iwl_priv *priv);
/* tx */
void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
@@ -330,6 +340,21 @@ void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv);
void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv);
+/* notification wait support */
+void __acquires(wait_entry)
+iwlagn_init_notification_wait(struct iwl_priv *priv,
+ struct iwl_notification_wait *wait_entry,
+ void (*fn)(struct iwl_priv *priv,
+ struct iwl_rx_packet *pkt),
+ u8 cmd);
+signed long __releases(wait_entry)
+iwlagn_wait_notification(struct iwl_priv *priv,
+ struct iwl_notification_wait *wait_entry,
+ unsigned long timeout);
+void __releases(wait_entry)
+iwlagn_remove_notification(struct iwl_priv *priv,
+ struct iwl_notification_wait *wait_entry);
+
/* mac80211 handlers (for 4965) */
int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
int iwlagn_mac_start(struct ieee80211_hw *hw);
@@ -349,7 +374,8 @@ void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size);
int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index f893d4a..0a1d4ae 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -178,7 +178,6 @@ enum {
REPLY_BT_COEX_PRIO_TABLE = 0xcc,
REPLY_BT_COEX_PROT_ENV = 0xcd,
REPLY_BT_COEX_PROFILE_NOTIF = 0xce,
- REPLY_BT_COEX_SCO = 0xcf,
/* PAN commands */
REPLY_WIPAN_PARAMS = 0xb2,
@@ -189,6 +188,7 @@ enum {
REPLY_WIPAN_WEPKEY = 0xb8, /* use REPLY_WEPKEY structure */
REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9,
REPLY_WIPAN_NOA_NOTIFICATION = 0xbc,
+ REPLY_WIPAN_DEACTIVATION_COMPLETE = 0xbd,
REPLY_MAX = 0xff
};
@@ -3082,6 +3082,13 @@ struct iwl4965_beacon_notif {
__le32 ibss_mgr_status;
} __packed;
+struct iwlagn_beacon_notif {
+ struct iwlagn_tx_resp beacon_notify_hdr;
+ __le32 low_tsf;
+ __le32 high_tsf;
+ __le32 ibss_mgr_status;
+} __packed;
+
/*
* REPLY_TX_BEACON = 0x91 (command, has simple generic response)
*/
@@ -4369,6 +4376,11 @@ int iwl_agn_check_rxon_cmd(struct iwl_priv *priv);
* REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification)
*/
+/*
+ * Minimum slot time in TU
+ */
+#define IWL_MIN_SLOT_TIME 20
+
/**
* struct iwl_wipan_slot
* @width: Time in TU
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index efbde1f..4ad8938 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -219,15 +219,12 @@ int iwlcore_init_geos(struct iwl_priv *priv)
if (!is_channel_valid(ch))
continue;
- if (is_channel_a_band(ch))
- sband = &priv->bands[IEEE80211_BAND_5GHZ];
- else
- sband = &priv->bands[IEEE80211_BAND_2GHZ];
+ sband = &priv->bands[ch->band];
geo_ch = &sband->channels[sband->n_channels++];
geo_ch->center_freq =
- ieee80211_channel_to_frequency(ch->channel);
+ ieee80211_channel_to_frequency(ch->channel, ch->band);
geo_ch->max_power = ch->max_power_avg;
geo_ch->max_antenna_gain = 0xff;
geo_ch->hw_value = ch->channel;
@@ -1161,6 +1158,8 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
{
int ret;
s8 prev_tx_power;
+ bool defer;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
lockdep_assert_held(&priv->mutex);
@@ -1188,10 +1187,15 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
if (!iwl_is_ready_rf(priv))
return -EIO;
- /* scan complete use tx_power_next, need to be updated */
+ /* scan complete and commit_rxon use tx_power_next value,
+ * it always need to be updated for newest request */
priv->tx_power_next = tx_power;
- if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
- IWL_DEBUG_INFO(priv, "Deferring tx power set while scanning\n");
+
+ /* do not set tx power when scanning or channel changing */
+ defer = test_bit(STATUS_SCANNING, &priv->status) ||
+ memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
+ if (defer && !force) {
+ IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
return 0;
}
@@ -1403,9 +1407,10 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
struct iwl_rxon_context *tmp, *ctx = NULL;
int err;
+ enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
- vif->type, vif->addr);
+ viftype, vif->addr);
mutex_lock(&priv->mutex);
@@ -1429,7 +1434,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
continue;
}
- if (!(possible_modes & BIT(vif->type)))
+ if (!(possible_modes & BIT(viftype)))
continue;
/* have maybe usable context w/o interface */
@@ -1675,7 +1680,6 @@ void iwl_clear_traffic_stats(struct iwl_priv *priv)
{
memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
- priv->led_tpt = 0;
}
/*
@@ -1768,7 +1772,6 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
stats->data_cnt++;
stats->data_bytes += len;
}
- iwl_leds_background(priv);
}
EXPORT_SYMBOL(iwl_update_stats);
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index a347437..e0ec170 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -227,8 +227,6 @@ struct iwl_lib_ops {
struct iwl_led_ops {
int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
- int (*on)(struct iwl_priv *priv);
- int (*off)(struct iwl_priv *priv);
};
/* NIC specific ops */
@@ -307,7 +305,6 @@ struct iwl_base_params {
u16 led_compensation;
const bool broken_powersave;
int chain_noise_num_beacons;
- const bool supports_idle;
bool adv_thermal_throttle;
bool support_ct_kill_exit;
const bool support_wimax_coexist;
@@ -366,6 +363,7 @@ struct iwl_ht_params {
* @adv_pm: advance power management
* @rx_with_siso_diversity: 1x1 device with rx antenna diversity
* @internal_wimax_coex: internal wifi/wimax combo device
+ * @iq_invert: I/Q inversion
*
* We enable the driver to be backward compatible wrt API version. The
* driver specifies which APIs it supports (with @ucode_api_max being the
@@ -415,6 +413,7 @@ struct iwl_cfg {
const bool adv_pm;
const bool rx_with_siso_diversity;
const bool internal_wimax_coex;
+ const bool iq_invert;
};
/***************************
@@ -494,18 +493,6 @@ static inline void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx,
__le16 fc, u16 len)
{
- struct traffic_stats *stats;
-
- if (is_tx)
- stats = &priv->tx_stats;
- else
- stats = &priv->rx_stats;
-
- if (ieee80211_is_data(fc)) {
- /* data */
- stats->data_bytes += len;
- }
- iwl_leds_background(priv);
}
#endif
/*****************************************************
@@ -755,6 +742,17 @@ static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
return priv->hw->wiphy->bands[band];
}
+static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
+{
+ return priv->cfg->bt_params &&
+ priv->cfg->bt_params->advanced_bt_coexist;
+}
+
+static inline bool iwl_bt_statistics(struct iwl_priv *priv)
+{
+ return priv->cfg->bt_params && priv->cfg->bt_params->bt_statistics;
+}
+
extern bool bt_coex_active;
extern bool bt_siso_mode;
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index b80bf7d..f52bc04 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -290,7 +290,7 @@
/* HW REV */
-#define CSR_HW_REV_TYPE_MSK (0x00000F0)
+#define CSR_HW_REV_TYPE_MSK (0x00001F0)
#define CSR_HW_REV_TYPE_3945 (0x00000D0)
#define CSR_HW_REV_TYPE_4965 (0x0000000)
#define CSR_HW_REV_TYPE_5300 (0x0000020)
@@ -300,9 +300,15 @@
#define CSR_HW_REV_TYPE_1000 (0x0000060)
#define CSR_HW_REV_TYPE_6x00 (0x0000070)
#define CSR_HW_REV_TYPE_6x50 (0x0000080)
-#define CSR_HW_REV_TYPE_6x50g2 (0x0000084)
-#define CSR_HW_REV_TYPE_6x00g2 (0x00000B0)
-#define CSR_HW_REV_TYPE_NONE (0x00000F0)
+#define CSR_HW_REV_TYPE_6150 (0x0000084)
+#define CSR_HW_REV_TYPE_6x05 (0x00000B0)
+#define CSR_HW_REV_TYPE_6x30 CSR_HW_REV_TYPE_6x05
+#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05
+#define CSR_HW_REV_TYPE_2x30 (0x00000C0)
+#define CSR_HW_REV_TYPE_2x00 (0x0000100)
+#define CSR_HW_REV_TYPE_200 (0x0000110)
+#define CSR_HW_REV_TYPE_230 (0x0000120)
+#define CSR_HW_REV_TYPE_NONE (0x00001F0)
/* EEPROM REG */
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
@@ -376,6 +382,8 @@
#define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6 (0x00000004)
#define CSR_GP_DRIVER_REG_BIT_6050_1x2 (0x00000008)
+#define CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER (0x00000080)
+
/* GIO Chicken Bits (PCI Express bus link power management) */
#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 6fe80b5..bc7a965 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -207,18 +207,19 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
return ret;
}
-#define BYTE1_MASK 0x000000ff;
-#define BYTE2_MASK 0x0000ffff;
-#define BYTE3_MASK 0x00ffffff;
static ssize_t iwl_dbgfs_sram_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- u32 val;
+ u32 val = 0;
char *buf;
ssize_t ret;
- int i;
+ int i = 0;
+ bool device_format = false;
+ int offset = 0;
+ int len = 0;
int pos = 0;
+ int sram;
struct iwl_priv *priv = file->private_data;
size_t bufsz;
@@ -230,35 +231,62 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
else
priv->dbgfs_sram_len = priv->ucode_data.len;
}
- bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10;
+ len = priv->dbgfs_sram_len;
+
+ if (len == -4) {
+ device_format = true;
+ len = 4;
+ }
+
+ bufsz = 50 + len * 4;
buf = kmalloc(bufsz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
+
pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
- priv->dbgfs_sram_len);
+ len);
pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
priv->dbgfs_sram_offset);
- for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
- val = iwl_read_targ_mem(priv, priv->dbgfs_sram_offset + \
- priv->dbgfs_sram_len - i);
- if (i < 4) {
- switch (i) {
- case 1:
- val &= BYTE1_MASK;
- break;
- case 2:
- val &= BYTE2_MASK;
- break;
- case 3:
- val &= BYTE3_MASK;
- break;
- }
+
+ /* adjust sram address since reads are only on even u32 boundaries */
+ offset = priv->dbgfs_sram_offset & 0x3;
+ sram = priv->dbgfs_sram_offset & ~0x3;
+
+ /* read the first u32 from sram */
+ val = iwl_read_targ_mem(priv, sram);
+
+ for (; len; len--) {
+ /* put the address at the start of every line */
+ if (i == 0)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "%08X: ", sram + offset);
+
+ if (device_format)
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "%02x", (val >> (8 * (3 - offset))) & 0xff);
+ else
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "%02x ", (val >> (8 * offset)) & 0xff);
+
+ /* if all bytes processed, read the next u32 from sram */
+ if (++offset == 4) {
+ sram += 4;
+ offset = 0;
+ val = iwl_read_targ_mem(priv, sram);
}
- if (!(i % 16))
+
+ /* put in extra spaces and split lines for human readability */
+ if (++i == 16) {
+ i = 0;
pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
+ } else if (!(i & 7)) {
+ pos += scnprintf(buf + pos, bufsz - pos, " ");
+ } else if (!(i & 3)) {
+ pos += scnprintf(buf + pos, bufsz - pos, " ");
+ }
}
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ if (i)
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
@@ -282,6 +310,9 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
priv->dbgfs_sram_offset = offset;
priv->dbgfs_sram_len = len;
+ } else if (sscanf(buf, "%x", &offset) == 1) {
+ priv->dbgfs_sram_offset = offset;
+ priv->dbgfs_sram_len = -4;
} else {
priv->dbgfs_sram_offset = 0;
priv->dbgfs_sram_len = 0;
@@ -668,29 +699,6 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char buf[256];
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "allow blinking: %s\n",
- (priv->allow_blinking) ? "True" : "False");
- if (priv->allow_blinking) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "Led blinking rate: %u\n",
- priv->last_blink_rate);
- pos += scnprintf(buf + pos, bufsz - pos,
- "Last blink time: %lu\n",
- priv->last_blink_time);
- }
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -856,7 +864,6 @@ DEBUGFS_READ_FILE_OPS(channels);
DEBUGFS_READ_FILE_OPS(status);
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
DEBUGFS_READ_FILE_OPS(qos);
-DEBUGFS_READ_FILE_OPS(led);
DEBUGFS_READ_FILE_OPS(thermal_throttling);
DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
@@ -1580,10 +1587,9 @@ static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file,
"last traffic notif: %d\n",
priv->bt_status ? "On" : "Off", priv->last_bt_traffic_load);
pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, "
- "sco_active: %d, kill_ack_mask: %x, "
- "kill_cts_mask: %x\n",
- priv->bt_ch_announce, priv->bt_sco_active,
- priv->kill_ack_mask, priv->kill_cts_mask);
+ "kill_ack_mask: %x, kill_cts_mask: %x\n",
+ priv->bt_ch_announce, priv->kill_ack_mask,
+ priv->kill_cts_mask);
pos += scnprintf(buf + pos, bufsz - pos, "bluetooth traffic load: ");
switch (priv->bt_traffic_load) {
@@ -1725,7 +1731,6 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(led, dir_data, S_IRUSR);
if (!priv->cfg->base_params->broken_powersave) {
DEBUGFS_ADD_FILE(sleep_level_override, dir_data,
S_IWUSR | S_IRUSR);
@@ -1759,13 +1764,13 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
if (priv->cfg->base_params->ucode_tracing)
DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
- if (priv->cfg->bt_params && priv->cfg->bt_params->bt_statistics)
+ if (iwl_bt_statistics(priv))
DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
- if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
+ if (iwl_advanced_bt_coexist(priv))
DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
if (priv->cfg->base_params->sensitivity_calib_by_driver)
DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 8dda678..ecfbef4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -34,6 +34,8 @@
#include <linux/pci.h> /* for struct pci_device_id */
#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/leds.h>
#include <net/ieee80211_radiotap.h>
#include "iwl-eeprom.h"
@@ -136,7 +138,7 @@ struct iwl_queue {
* space more than this */
int high_mark; /* high watermark, stop queue if free
* space less than this */
-} __packed;
+};
/* One for each TFD */
struct iwl_tx_info {
@@ -507,6 +509,7 @@ struct iwl_station_priv {
atomic_t pending_frames;
bool client;
bool asleep;
+ u8 max_agg_bufsize;
};
/**
@@ -995,7 +998,6 @@ struct reply_agg_tx_error_statistics {
u32 unknown;
};
-#ifdef CONFIG_IWLWIFI_DEBUGFS
/* management statistics */
enum iwl_mgmt_stats {
MANAGEMENT_ASSOC_REQ = 0,
@@ -1026,16 +1028,13 @@ enum iwl_ctrl_stats {
};
struct traffic_stats {
+#ifdef CONFIG_IWLWIFI_DEBUGFS
u32 mgmt[MANAGEMENT_MAX];
u32 ctrl[CONTROL_MAX];
u32 data_cnt;
u64 data_bytes;
-};
-#else
-struct traffic_stats {
- u64 data_bytes;
-};
#endif
+};
/*
* iwl_switch_rxon: "channel switch" structure
@@ -1139,6 +1138,33 @@ struct iwl_force_reset {
*/
#define IWLAGN_EXT_BEACON_TIME_POS 22
+/**
+ * struct iwl_notification_wait - notification wait entry
+ * @list: list head for global list
+ * @fn: function called with the notification
+ * @cmd: command ID
+ *
+ * This structure is not used directly, to wait for a
+ * notification declare it on the stack, and call
+ * iwlagn_init_notification_wait() with appropriate
+ * parameters. Then do whatever will cause the ucode
+ * to notify the driver, and to wait for that then
+ * call iwlagn_wait_notification().
+ *
+ * Each notification is one-shot. If at some point we
+ * need to support multi-shot notifications (which
+ * can't be allocated on the stack) we need to modify
+ * the code for them.
+ */
+struct iwl_notification_wait {
+ struct list_head list;
+
+ void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt);
+
+ u8 cmd;
+ bool triggered;
+};
+
enum iwl_rxon_context_id {
IWL_RXON_CTX_BSS,
IWL_RXON_CTX_PAN,
@@ -1310,11 +1336,6 @@ struct iwl_priv {
struct iwl_init_alive_resp card_alive_init;
struct iwl_alive_resp card_alive;
- unsigned long last_blink_time;
- u8 last_blink_rate;
- u8 allow_blinking;
- u64 led_tpt;
-
u16 active_rate;
u8 start_calib;
@@ -1463,6 +1484,17 @@ struct iwl_priv {
struct iwl_bt_notif_statistics delta_statistics_bt;
struct iwl_bt_notif_statistics max_delta_bt;
#endif
+
+ /* notification wait support */
+ struct list_head notif_waits;
+ spinlock_t notif_wait_lock;
+ wait_queue_head_t notif_waitq;
+
+ /* remain-on-channel offload support */
+ struct ieee80211_channel *hw_roc_channel;
+ struct delayed_work hw_roc_work;
+ enum nl80211_channel_type hw_roc_chantype;
+ int hw_roc_duration;
} _agn;
#endif
};
@@ -1472,7 +1504,6 @@ struct iwl_priv {
u8 bt_status;
u8 bt_traffic_load, last_bt_traffic_load;
bool bt_ch_announce;
- bool bt_sco_active;
bool bt_full_concurrent;
bool bt_ant_couple_ok;
__le32 kill_ack_mask;
@@ -1547,6 +1578,10 @@ struct iwl_priv {
bool hw_ready;
struct iwl_event_log event_log;
+
+ struct led_classdev led;
+ unsigned long blink_on, blink_off;
+ bool led_registered;
}; /*iwl_priv */
static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 9e6f313..98aa8af 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -247,13 +247,26 @@ struct iwl_eeprom_enhanced_txpwr {
#define EEPROM_6050_TX_POWER_VERSION (4)
#define EEPROM_6050_EEPROM_VERSION (0x532)
-/* 6x50g2 Specific */
-#define EEPROM_6050G2_TX_POWER_VERSION (6)
-#define EEPROM_6050G2_EEPROM_VERSION (0x553)
+/* 6150 Specific */
+#define EEPROM_6150_TX_POWER_VERSION (6)
+#define EEPROM_6150_EEPROM_VERSION (0x553)
+
+/* 6x05 Specific */
+#define EEPROM_6005_TX_POWER_VERSION (6)
+#define EEPROM_6005_EEPROM_VERSION (0x709)
+
+/* 6x30 Specific */
+#define EEPROM_6030_TX_POWER_VERSION (6)
+#define EEPROM_6030_EEPROM_VERSION (0x709)
+
+/* 2x00 Specific */
+#define EEPROM_2000_TX_POWER_VERSION (6)
+#define EEPROM_2000_EEPROM_VERSION (0x805)
+
+/* 6x35 Specific */
+#define EEPROM_6035_TX_POWER_VERSION (6)
+#define EEPROM_6035_EEPROM_VERSION (0x753)
-/* 6x00g2 Specific */
-#define EEPROM_6000G2_TX_POWER_VERSION (6)
-#define EEPROM_6000G2_EEPROM_VERSION (0x709)
/* OTP */
/* lower blocks contain EEPROM image and calibration data */
@@ -264,6 +277,7 @@ struct iwl_eeprom_enhanced_txpwr {
#define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */
#define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */
#define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */
+#define OTP_MAX_LL_ITEMS_2x00 (4) /* OTP blocks for 2x00 */
/* 2.4 GHz */
extern const u8 iwl_eeprom_band_1[14];
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index c373b53..e4b953d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -108,6 +108,7 @@ const char *get_cmd_string(u8 cmd)
IWL_CMD(REPLY_WIPAN_WEPKEY);
IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
+ IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE);
default:
return "UNKNOWN";
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 46ccdf4..074ad22 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -48,31 +48,19 @@ module_param(led_mode, int, S_IRUGO);
MODULE_PARM_DESC(led_mode, "0=system default, "
"1=On(RF On)/Off(RF Off), 2=blinking");
-static const struct {
- u16 tpt; /* Mb/s */
- u8 on_time;
- u8 off_time;
-} blink_tbl[] =
-{
- {300, 25, 25},
- {200, 40, 40},
- {100, 55, 55},
- {70, 65, 65},
- {50, 75, 75},
- {20, 85, 85},
- {10, 95, 95},
- {5, 110, 110},
- {1, 130, 130},
- {0, 167, 167},
- /* SOLID_ON */
- {-1, IWL_LED_SOLID, 0}
+static const struct ieee80211_tpt_blink iwl_blink[] = {
+ { .throughput = 0 * 1024 - 1, .blink_time = 334 },
+ { .throughput = 1 * 1024 - 1, .blink_time = 260 },
+ { .throughput = 5 * 1024 - 1, .blink_time = 220 },
+ { .throughput = 10 * 1024 - 1, .blink_time = 190 },
+ { .throughput = 20 * 1024 - 1, .blink_time = 170 },
+ { .throughput = 50 * 1024 - 1, .blink_time = 150 },
+ { .throughput = 70 * 1024 - 1, .blink_time = 130 },
+ { .throughput = 100 * 1024 - 1, .blink_time = 110 },
+ { .throughput = 200 * 1024 - 1, .blink_time = 80 },
+ { .throughput = 300 * 1024 - 1, .blink_time = 50 },
};
-#define IWL_1MB_RATE (128 * 1024)
-#define IWL_LED_THRESHOLD (16)
-#define IWL_MAX_BLINK_TBL (ARRAY_SIZE(blink_tbl) - 1) /* exclude SOLID_ON */
-#define IWL_SOLID_BLINK_IDX (ARRAY_SIZE(blink_tbl) - 1)
-
/*
* Adjust led blink rate to compensate on a MAC Clock difference on every HW
* Led blink rate analysis showed an average deviation of 0% on 3945,
@@ -97,133 +85,104 @@ static inline u8 iwl_blink_compensation(struct iwl_priv *priv,
}
/* Set led pattern command */
-static int iwl_led_pattern(struct iwl_priv *priv, unsigned int idx)
+static int iwl_led_cmd(struct iwl_priv *priv,
+ unsigned long on,
+ unsigned long off)
{
struct iwl_led_cmd led_cmd = {
.id = IWL_LED_LINK,
.interval = IWL_DEF_LED_INTRVL
};
+ int ret;
- BUG_ON(idx > IWL_MAX_BLINK_TBL);
+ if (!test_bit(STATUS_READY, &priv->status))
+ return -EBUSY;
- IWL_DEBUG_LED(priv, "Led blink time compensation= %u\n",
+ if (priv->blink_on == on && priv->blink_off == off)
+ return 0;
+
+ IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
priv->cfg->base_params->led_compensation);
- led_cmd.on =
- iwl_blink_compensation(priv, blink_tbl[idx].on_time,
+ led_cmd.on = iwl_blink_compensation(priv, on,
priv->cfg->base_params->led_compensation);
- led_cmd.off =
- iwl_blink_compensation(priv, blink_tbl[idx].off_time,
+ led_cmd.off = iwl_blink_compensation(priv, off,
priv->cfg->base_params->led_compensation);
- return priv->cfg->ops->led->cmd(priv, &led_cmd);
+ ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
+ if (!ret) {
+ priv->blink_on = on;
+ priv->blink_off = off;
+ }
+ return ret;
}
-int iwl_led_start(struct iwl_priv *priv)
+static void iwl_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
{
- return priv->cfg->ops->led->on(priv);
-}
-EXPORT_SYMBOL(iwl_led_start);
+ struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
+ unsigned long on = 0;
-int iwl_led_associate(struct iwl_priv *priv)
-{
- IWL_DEBUG_LED(priv, "Associated\n");
- if (priv->cfg->led_mode == IWL_LED_BLINK)
- priv->allow_blinking = 1;
- priv->last_blink_time = jiffies;
+ if (brightness > 0)
+ on = IWL_LED_SOLID;
- return 0;
+ iwl_led_cmd(priv, on, 0);
}
-EXPORT_SYMBOL(iwl_led_associate);
-int iwl_led_disassociate(struct iwl_priv *priv)
+static int iwl_led_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
{
- priv->allow_blinking = 0;
+ struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
- return 0;
+ return iwl_led_cmd(priv, *delay_on, *delay_off);
}
-EXPORT_SYMBOL(iwl_led_disassociate);
-/*
- * calculate blink rate according to last second Tx/Rx activities
- */
-static int iwl_get_blink_rate(struct iwl_priv *priv)
-{
- int i;
- /* count both tx and rx traffic to be able to
- * handle traffic in either direction
- */
- u64 current_tpt = priv->tx_stats.data_bytes +
- priv->rx_stats.data_bytes;
- s64 tpt = current_tpt - priv->led_tpt;
-
- if (tpt < 0) /* wraparound */
- tpt = -tpt;
-
- IWL_DEBUG_LED(priv, "tpt %lld current_tpt %llu\n",
- (long long)tpt,
- (unsigned long long)current_tpt);
- priv->led_tpt = current_tpt;
-
- if (!priv->allow_blinking)
- i = IWL_MAX_BLINK_TBL;
- else
- for (i = 0; i < IWL_MAX_BLINK_TBL; i++)
- if (tpt > (blink_tbl[i].tpt * IWL_1MB_RATE))
- break;
-
- IWL_DEBUG_LED(priv, "LED BLINK IDX=%d\n", i);
- return i;
-}
-
-/*
- * this function called from handler. Since setting Led command can
- * happen very frequent we postpone led command to be called from
- * REPLY handler so we know ucode is up
- */
-void iwl_leds_background(struct iwl_priv *priv)
+void iwl_leds_init(struct iwl_priv *priv)
{
- u8 blink_idx;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- priv->last_blink_time = 0;
- return;
- }
- if (iwl_is_rfkill(priv)) {
- priv->last_blink_time = 0;
- return;
+ int mode = led_mode;
+ int ret;
+
+ if (mode == IWL_LED_DEFAULT)
+ mode = priv->cfg->led_mode;
+
+ priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
+ wiphy_name(priv->hw->wiphy));
+ priv->led.brightness_set = iwl_led_brightness_set;
+ priv->led.blink_set = iwl_led_blink_set;
+ priv->led.max_brightness = 1;
+
+ switch (mode) {
+ case IWL_LED_DEFAULT:
+ WARN_ON(1);
+ break;
+ case IWL_LED_BLINK:
+ priv->led.default_trigger =
+ ieee80211_create_tpt_led_trigger(priv->hw,
+ IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
+ iwl_blink, ARRAY_SIZE(iwl_blink));
+ break;
+ case IWL_LED_RF_STATE:
+ priv->led.default_trigger =
+ ieee80211_get_radio_led_name(priv->hw);
+ break;
}
- if (!priv->allow_blinking) {
- priv->last_blink_time = 0;
- if (priv->last_blink_rate != IWL_SOLID_BLINK_IDX) {
- priv->last_blink_rate = IWL_SOLID_BLINK_IDX;
- iwl_led_pattern(priv, IWL_SOLID_BLINK_IDX);
- }
+ ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
+ if (ret) {
+ kfree(priv->led.name);
return;
}
- if (!priv->last_blink_time ||
- !time_after(jiffies, priv->last_blink_time +
- msecs_to_jiffies(1000)))
- return;
-
- blink_idx = iwl_get_blink_rate(priv);
- /* call only if blink rate change */
- if (blink_idx != priv->last_blink_rate)
- iwl_led_pattern(priv, blink_idx);
-
- priv->last_blink_time = jiffies;
- priv->last_blink_rate = blink_idx;
+ priv->led_registered = true;
}
-EXPORT_SYMBOL(iwl_leds_background);
+EXPORT_SYMBOL(iwl_leds_init);
-void iwl_leds_init(struct iwl_priv *priv)
+void iwl_leds_exit(struct iwl_priv *priv)
{
- priv->last_blink_rate = 0;
- priv->last_blink_time = 0;
- priv->allow_blinking = 0;
- if (led_mode != IWL_LED_DEFAULT &&
- led_mode != priv->cfg->led_mode)
- priv->cfg->led_mode = led_mode;
+ if (!priv->led_registered)
+ return;
+
+ led_classdev_unregister(&priv->led);
+ kfree(priv->led.name);
}
-EXPORT_SYMBOL(iwl_leds_init);
+EXPORT_SYMBOL(iwl_leds_exit);
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index 9079b33..101eef1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -31,23 +31,14 @@
struct iwl_priv;
#define IWL_LED_SOLID 11
-#define IWL_LED_NAME_LEN 31
#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
#define IWL_LED_ACTIVITY (0<<1)
#define IWL_LED_LINK (1<<1)
-enum led_type {
- IWL_LED_TRG_TX,
- IWL_LED_TRG_RX,
- IWL_LED_TRG_ASSOC,
- IWL_LED_TRG_RADIO,
- IWL_LED_TRG_MAX,
-};
-
/*
* LED mode
- * IWL_LED_DEFAULT: use system default
+ * IWL_LED_DEFAULT: use device default
* IWL_LED_RF_STATE: turn LED on/off based on RF state
* LED ON = RF ON
* LED OFF = RF OFF
@@ -60,9 +51,6 @@ enum iwl_led_mode {
};
void iwl_leds_init(struct iwl_priv *priv);
-void iwl_leds_background(struct iwl_priv *priv);
-int iwl_led_start(struct iwl_priv *priv);
-int iwl_led_associate(struct iwl_priv *priv);
-int iwl_led_disassociate(struct iwl_priv *priv);
+void iwl_leds_exit(struct iwl_priv *priv);
#endif /* __iwl_leds_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.c b/drivers/net/wireless/iwlwifi/iwl-legacy.c
index bb1a742..e1ace3c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-legacy.c
+++ b/drivers/net/wireless/iwlwifi/iwl-legacy.c
@@ -85,10 +85,9 @@ int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
channel->hw_value, changed);
- if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
- test_bit(STATUS_SCANNING, &priv->status))) {
+ if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
scan_active = 1;
- IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
+ IWL_DEBUG_MAC80211(priv, "scan active\n");
}
if (changed & (IEEE80211_CONF_CHANGE_SMPS |
@@ -332,7 +331,6 @@ static inline void iwl_set_no_assoc(struct iwl_priv *priv,
{
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
- iwl_led_disassociate(priv);
/*
* inform the ucode that there is no longer an
* association and that no more packets should be
@@ -520,8 +518,6 @@ void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
if (bss_conf->assoc) {
priv->timestamp = bss_conf->timestamp;
- iwl_led_associate(priv);
-
if (!iwl_is_rfkill(priv))
priv->cfg->ops->legacy->post_associate(priv);
} else
@@ -545,7 +541,6 @@ void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
memcpy(ctx->staging.bssid_addr,
bss_conf->bssid, ETH_ALEN);
memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
- iwl_led_associate(priv);
priv->cfg->ops->legacy->config_ap(priv);
} else
iwl_set_no_assoc(priv, vif);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 1eec18d..1d1bf32 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -226,8 +226,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
else
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ if (iwl_advanced_bt_coexist(priv)) {
if (!priv->cfg->bt_params->bt_sco_disable)
cmd->flags |= IWL_POWER_BT_SCO_ENA;
else
@@ -313,8 +312,7 @@ static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
else
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ if (iwl_advanced_bt_coexist(priv)) {
if (!priv->cfg->bt_params->bt_sco_disable)
cmd->flags |= IWL_POWER_BT_SCO_ENA;
else
@@ -358,8 +356,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
if (priv->cfg->base_params->broken_powersave)
iwl_power_sleep_cam_cmd(priv, cmd);
- else if (priv->cfg->base_params->supports_idle &&
- priv->hw->conf.flags & IEEE80211_CONF_IDLE)
+ else if (priv->hw->conf.flags & IEEE80211_CONF_IDLE)
iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
else if (priv->cfg->ops->lib->tt_ops.lower_power_detection &&
priv->cfg->ops->lib->tt_ops.tt_power_mode &&
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 12d9363..08f1bea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -257,8 +257,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
queue_work(priv->workqueue, &priv->scan_completed);
if (priv->iw_mode != NL80211_IFTYPE_ADHOC &&
- priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ iwl_advanced_bt_coexist(priv) &&
priv->bt_status != scan_notif->bt_status) {
if (scan_notif->bt_status) {
/* BT on */
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 371abbf..adcef73 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -2517,7 +2517,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
ieee80211_wake_queues(priv->hw);
- priv->active_rate = IWL_RATES_MASK;
+ priv->active_rate = IWL_RATES_MASK_3945;
iwl_power_update_mode(priv, true);
@@ -2535,15 +2535,14 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
/* Configure Bluetooth device coexistence support */
priv->cfg->ops->hcmd->send_bt_config(priv);
+ set_bit(STATUS_READY, &priv->status);
+
/* Configure the adapter for unassociated operation */
iwl3945_commit_rxon(priv, ctx);
iwl3945_reg_txpower_periodic(priv);
- iwl_leds_init(priv);
-
IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
- set_bit(STATUS_READY, &priv->status);
wake_up_interruptible(&priv->wait_command_queue);
return;
@@ -2861,16 +2860,13 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
u32 extra;
u32 suspend_time = 100;
u32 scan_suspend_time = 100;
- unsigned long flags;
IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
- spin_lock_irqsave(&priv->lock, flags);
if (priv->is_internal_short_scan)
interval = 0;
else
interval = vif->bss_conf.beacon_int;
- spin_unlock_irqrestore(&priv->lock, flags);
scan->suspend_time = 0;
scan->max_out_time = cpu_to_le32(200 * 1024);
@@ -3170,8 +3166,6 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
* no need to poll the killswitch state anymore */
cancel_delayed_work(&priv->_3945.rfkill_poll);
- iwl_led_start(priv);
-
priv->is_open = 1;
IWL_DEBUG_MAC80211(priv, "leave\n");
return 0;
@@ -3289,6 +3283,14 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
+ /*
+ * To support IBSS RSN, don't program group keys in IBSS, the
+ * hardware will then not attempt to decrypt the frames.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
static_key = !iwl_is_associated(priv, IWL_RXON_CTX_BSS);
if (!static_key) {
@@ -3918,7 +3920,8 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_DISABLE_BEACON_HINTS;
+ WIPHY_FLAG_DISABLE_BEACON_HINTS |
+ WIPHY_FLAG_IBSS_RSN;
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
/* we create the 802.11 header and a zero-length SSID element */
@@ -3935,6 +3938,8 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&priv->bands[IEEE80211_BAND_5GHZ];
+ iwl_leds_init(priv);
+
ret = ieee80211_register_hw(priv->hw);
if (ret) {
IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
@@ -4194,6 +4199,8 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
set_bit(STATUS_EXIT_PENDING, &priv->status);
+ iwl_leds_exit(priv);
+
if (priv->mac80211_registered) {
ieee80211_unregister_hw(priv->hw);
priv->mac80211_registered = 0;
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index 5a49822..ed57e44 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -287,7 +287,8 @@ int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
return -EINVAL;
}
- freq = ieee80211_channel_to_frequency(umac_bss->channel);
+ freq = ieee80211_channel_to_frequency(umac_bss->channel,
+ band->band);
channel = ieee80211_get_channel(wiphy, freq);
signal = umac_bss->rssi * 100;
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index a944893..9a57cf6 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -543,7 +543,10 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
switch (le32_to_cpu(complete->status)) {
case UMAC_ASSOC_COMPLETE_SUCCESS:
chan = ieee80211_get_channel(wiphy,
- ieee80211_channel_to_frequency(complete->channel));
+ ieee80211_channel_to_frequency(complete->channel,
+ complete->band == UMAC_BAND_2GHZ ?
+ IEEE80211_BAND_2GHZ :
+ IEEE80211_BAND_5GHZ));
if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
/* Associated to a unallowed channel, disassociate. */
__iwm_invalidate_mlme_profile(iwm);
@@ -841,7 +844,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
goto err;
}
- freq = ieee80211_channel_to_frequency(umac_bss->channel);
+ freq = ieee80211_channel_to_frequency(umac_bss->channel, band->band);
channel = ieee80211_get_channel(wiphy, freq);
signal = umac_bss->rssi * 100;
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 698a1f7..30ef035 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -607,7 +607,8 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
/* No channel, no luck */
if (chan_no != -1) {
struct wiphy *wiphy = priv->wdev->wiphy;
- int freq = ieee80211_channel_to_frequency(chan_no);
+ int freq = ieee80211_channel_to_frequency(chan_no,
+ IEEE80211_BAND_2GHZ);
struct ieee80211_channel *channel =
ieee80211_get_channel(wiphy, freq);
@@ -1597,7 +1598,8 @@ static int lbs_get_survey(struct wiphy *wiphy, struct net_device *dev,
lbs_deb_enter(LBS_DEB_CFG80211);
survey->channel = ieee80211_get_channel(wiphy,
- ieee80211_channel_to_frequency(priv->channel));
+ ieee80211_channel_to_frequency(priv->channel,
+ IEEE80211_BAND_2GHZ));
ret = lbs_get_rssi(priv, &signal, &noise);
if (ret == 0) {
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 78c4da1..7e8a658 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -145,9 +145,13 @@ int lbs_update_hw_spec(struct lbs_private *priv)
if (priv->current_addr[0] == 0xff)
memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN);
- memcpy(priv->dev->dev_addr, priv->current_addr, ETH_ALEN);
- if (priv->mesh_dev)
- memcpy(priv->mesh_dev->dev_addr, priv->current_addr, ETH_ALEN);
+ if (!priv->copied_hwaddr) {
+ memcpy(priv->dev->dev_addr, priv->current_addr, ETH_ALEN);
+ if (priv->mesh_dev)
+ memcpy(priv->mesh_dev->dev_addr,
+ priv->current_addr, ETH_ALEN);
+ priv->copied_hwaddr = 1;
+ }
out:
lbs_deb_leave(LBS_DEB_CMD);
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 18dd9a0..bc461eb 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -90,6 +90,7 @@ struct lbs_private {
void *card;
u8 fw_ready;
u8 surpriseremoved;
+ u8 setup_fw_on_resume;
int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
void (*reset_card) (struct lbs_private *priv);
int (*enter_deep_sleep) (struct lbs_private *priv);
@@ -101,6 +102,7 @@ struct lbs_private {
u32 fwcapinfo;
u16 regioncode;
u8 current_addr[ETH_ALEN];
+ u8 copied_hwaddr;
/* Command download */
u8 dnld_sent;
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 0060023..f6c2cd66 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -20,10 +20,8 @@
#include <linux/moduleparam.h>
#include <linux/firmware.h>
#include <linux/jiffies.h>
-#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/netdevice.h>
-#include <linux/semaphore.h>
#include <linux/slab.h>
#include <linux/spi/libertas_spi.h>
#include <linux/spi/spi.h>
@@ -34,6 +32,12 @@
#include "dev.h"
#include "if_spi.h"
+struct if_spi_packet {
+ struct list_head list;
+ u16 blen;
+ u8 buffer[0] __attribute__((aligned(4)));
+};
+
struct if_spi_card {
struct spi_device *spi;
struct lbs_private *priv;
@@ -51,18 +55,36 @@ struct if_spi_card {
unsigned long spu_reg_delay;
/* Handles all SPI communication (except for FW load) */
- struct task_struct *spi_thread;
- int run_thread;
-
- /* Used to wake up the spi_thread */
- struct semaphore spi_ready;
- struct semaphore spi_thread_terminated;
+ struct workqueue_struct *workqueue;
+ struct work_struct packet_work;
u8 cmd_buffer[IF_SPI_CMD_BUF_SIZE];
+
+ /* A buffer of incoming packets from libertas core.
+ * Since we can't sleep in hw_host_to_card, we have to buffer
+ * them. */
+ struct list_head cmd_packet_list;
+ struct list_head data_packet_list;
+
+ /* Protects cmd_packet_list and data_packet_list */
+ spinlock_t buffer_lock;
};
static void free_if_spi_card(struct if_spi_card *card)
{
+ struct list_head *cursor, *next;
+ struct if_spi_packet *packet;
+
+ list_for_each_safe(cursor, next, &card->cmd_packet_list) {
+ packet = container_of(cursor, struct if_spi_packet, list);
+ list_del(&packet->list);
+ kfree(packet);
+ }
+ list_for_each_safe(cursor, next, &card->data_packet_list) {
+ packet = container_of(cursor, struct if_spi_packet, list);
+ list_del(&packet->list);
+ kfree(packet);
+ }
spi_set_drvdata(card->spi, NULL);
kfree(card);
}
@@ -622,7 +644,7 @@ out:
/*
* SPI Transfer Thread
*
- * The SPI thread handles all SPI transfers, so there is no need for a lock.
+ * The SPI worker handles all SPI transfers, so there is no need for a lock.
*/
/* Move a command from the card to the host */
@@ -742,6 +764,40 @@ out:
return err;
}
+/* Move data or a command from the host to the card. */
+static void if_spi_h2c(struct if_spi_card *card,
+ struct if_spi_packet *packet, int type)
+{
+ int err = 0;
+ u16 int_type, port_reg;
+
+ switch (type) {
+ case MVMS_DAT:
+ int_type = IF_SPI_CIC_TX_DOWNLOAD_OVER;
+ port_reg = IF_SPI_DATA_RDWRPORT_REG;
+ break;
+ case MVMS_CMD:
+ int_type = IF_SPI_CIC_CMD_DOWNLOAD_OVER;
+ port_reg = IF_SPI_CMD_RDWRPORT_REG;
+ break;
+ default:
+ lbs_pr_err("can't transfer buffer of type %d\n", type);
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Write the data to the card */
+ err = spu_write(card, port_reg, packet->buffer, packet->blen);
+ if (err)
+ goto out;
+
+out:
+ kfree(packet);
+
+ if (err)
+ lbs_pr_err("%s: error %d\n", __func__, err);
+}
+
/* Inform the host about a card event */
static void if_spi_e2h(struct if_spi_card *card)
{
@@ -766,71 +822,88 @@ out:
lbs_pr_err("%s: error %d\n", __func__, err);
}
-static int lbs_spi_thread(void *data)
+static void if_spi_host_to_card_worker(struct work_struct *work)
{
int err;
- struct if_spi_card *card = data;
+ struct if_spi_card *card;
u16 hiStatus;
+ unsigned long flags;
+ struct if_spi_packet *packet;
- while (1) {
- /* Wait to be woken up by one of two things. First, our ISR
- * could tell us that something happened on the WLAN.
- * Secondly, libertas could call hw_host_to_card with more
- * data, which we might be able to send.
- */
- do {
- err = down_interruptible(&card->spi_ready);
- if (!card->run_thread) {
- up(&card->spi_thread_terminated);
- do_exit(0);
- }
- } while (err == -EINTR);
+ card = container_of(work, struct if_spi_card, packet_work);
- /* Read the host interrupt status register to see what we
- * can do. */
- err = spu_read_u16(card, IF_SPI_HOST_INT_STATUS_REG,
- &hiStatus);
- if (err) {
- lbs_pr_err("I/O error\n");
+ lbs_deb_enter(LBS_DEB_SPI);
+
+ /* Read the host interrupt status register to see what we
+ * can do. */
+ err = spu_read_u16(card, IF_SPI_HOST_INT_STATUS_REG,
+ &hiStatus);
+ if (err) {
+ lbs_pr_err("I/O error\n");
+ goto err;
+ }
+
+ if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY) {
+ err = if_spi_c2h_cmd(card);
+ if (err)
goto err;
- }
+ }
+ if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY) {
+ err = if_spi_c2h_data(card);
+ if (err)
+ goto err;
+ }
- if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY) {
- err = if_spi_c2h_cmd(card);
- if (err)
- goto err;
- }
- if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY) {
- err = if_spi_c2h_data(card);
- if (err)
- goto err;
+ /* workaround: in PS mode, the card does not set the Command
+ * Download Ready bit, but it sets TX Download Ready. */
+ if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY ||
+ (card->priv->psstate != PS_STATE_FULL_POWER &&
+ (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) {
+ /* This means two things. First of all,
+ * if there was a previous command sent, the card has
+ * successfully received it.
+ * Secondly, it is now ready to download another
+ * command.
+ */
+ lbs_host_to_card_done(card->priv);
+
+ /* Do we have any command packets from the host to
+ * send? */
+ packet = NULL;
+ spin_lock_irqsave(&card->buffer_lock, flags);
+ if (!list_empty(&card->cmd_packet_list)) {
+ packet = (struct if_spi_packet *)(card->
+ cmd_packet_list.next);
+ list_del(&packet->list);
}
+ spin_unlock_irqrestore(&card->buffer_lock, flags);
- /* workaround: in PS mode, the card does not set the Command
- * Download Ready bit, but it sets TX Download Ready. */
- if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY ||
- (card->priv->psstate != PS_STATE_FULL_POWER &&
- (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) {
- lbs_host_to_card_done(card->priv);
+ if (packet)
+ if_spi_h2c(card, packet, MVMS_CMD);
+ }
+ if (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY) {
+ /* Do we have any data packets from the host to
+ * send? */
+ packet = NULL;
+ spin_lock_irqsave(&card->buffer_lock, flags);
+ if (!list_empty(&card->data_packet_list)) {
+ packet = (struct if_spi_packet *)(card->
+ data_packet_list.next);
+ list_del(&packet->list);
}
+ spin_unlock_irqrestore(&card->buffer_lock, flags);
- if (hiStatus & IF_SPI_HIST_CARD_EVENT)
- if_spi_e2h(card);
+ if (packet)
+ if_spi_h2c(card, packet, MVMS_DAT);
+ }
+ if (hiStatus & IF_SPI_HIST_CARD_EVENT)
+ if_spi_e2h(card);
err:
- if (err)
- lbs_pr_err("%s: got error %d\n", __func__, err);
- }
-}
+ if (err)
+ lbs_pr_err("%s: got error %d\n", __func__, err);
-/* Block until lbs_spi_thread thread has terminated */
-static void if_spi_terminate_spi_thread(struct if_spi_card *card)
-{
- /* It would be nice to use kthread_stop here, but that function
- * can't wake threads waiting for a semaphore. */
- card->run_thread = 0;
- up(&card->spi_ready);
- down(&card->spi_thread_terminated);
+ lbs_deb_leave(LBS_DEB_SPI);
}
/*
@@ -842,18 +915,40 @@ static int if_spi_host_to_card(struct lbs_private *priv,
u8 type, u8 *buf, u16 nb)
{
int err = 0;
+ unsigned long flags;
struct if_spi_card *card = priv->card;
+ struct if_spi_packet *packet;
+ u16 blen;
lbs_deb_enter_args(LBS_DEB_SPI, "type %d, bytes %d", type, nb);
- nb = ALIGN(nb, 4);
+ if (nb == 0) {
+ lbs_pr_err("%s: invalid size requested: %d\n", __func__, nb);
+ err = -EINVAL;
+ goto out;
+ }
+ blen = ALIGN(nb, 4);
+ packet = kzalloc(sizeof(struct if_spi_packet) + blen, GFP_ATOMIC);
+ if (!packet) {
+ err = -ENOMEM;
+ goto out;
+ }
+ packet->blen = blen;
+ memcpy(packet->buffer, buf, nb);
+ memset(packet->buffer + nb, 0, blen - nb);
switch (type) {
case MVMS_CMD:
- err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG, buf, nb);
+ priv->dnld_sent = DNLD_CMD_SENT;
+ spin_lock_irqsave(&card->buffer_lock, flags);
+ list_add_tail(&packet->list, &card->cmd_packet_list);
+ spin_unlock_irqrestore(&card->buffer_lock, flags);
break;
case MVMS_DAT:
- err = spu_write(card, IF_SPI_DATA_RDWRPORT_REG, buf, nb);
+ priv->dnld_sent = DNLD_DATA_SENT;
+ spin_lock_irqsave(&card->buffer_lock, flags);
+ list_add_tail(&packet->list, &card->data_packet_list);
+ spin_unlock_irqrestore(&card->buffer_lock, flags);
break;
default:
lbs_pr_err("can't transfer buffer of type %d", type);
@@ -861,6 +956,9 @@ static int if_spi_host_to_card(struct lbs_private *priv,
break;
}
+ /* Queue spi xfer work */
+ queue_work(card->workqueue, &card->packet_work);
+out:
lbs_deb_leave_args(LBS_DEB_SPI, "err=%d", err);
return err;
}
@@ -869,13 +967,14 @@ static int if_spi_host_to_card(struct lbs_private *priv,
* Host Interrupts
*
* Service incoming interrupts from the WLAN device. We can't sleep here, so
- * don't try to talk on the SPI bus, just wake up the SPI thread.
+ * don't try to talk on the SPI bus, just queue the SPI xfer work.
*/
static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id)
{
struct if_spi_card *card = dev_id;
- up(&card->spi_ready);
+ queue_work(card->workqueue, &card->packet_work);
+
return IRQ_HANDLED;
}
@@ -883,56 +982,26 @@ static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id)
* SPI callbacks
*/
-static int __devinit if_spi_probe(struct spi_device *spi)
+static int if_spi_init_card(struct if_spi_card *card)
{
- struct if_spi_card *card;
- struct lbs_private *priv = NULL;
- struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
- int err = 0, i;
+ struct spi_device *spi = card->spi;
+ int err, i;
u32 scratch;
- struct sched_param param = { .sched_priority = 1 };
const struct firmware *helper = NULL;
const struct firmware *mainfw = NULL;
lbs_deb_enter(LBS_DEB_SPI);
- if (!pdata) {
- err = -EINVAL;
- goto out;
- }
-
- if (pdata->setup) {
- err = pdata->setup(spi);
- if (err)
- goto out;
- }
-
- /* Allocate card structure to represent this specific device */
- card = kzalloc(sizeof(struct if_spi_card), GFP_KERNEL);
- if (!card) {
- err = -ENOMEM;
- goto out;
- }
- spi_set_drvdata(spi, card);
- card->pdata = pdata;
- card->spi = spi;
- card->prev_xfer_time = jiffies;
-
- sema_init(&card->spi_ready, 0);
- sema_init(&card->spi_thread_terminated, 0);
-
- /* Initialize the SPI Interface Unit */
- err = spu_init(card, pdata->use_dummy_writes);
+ err = spu_init(card, card->pdata->use_dummy_writes);
if (err)
- goto free_card;
+ goto out;
err = spu_get_chip_revision(card, &card->card_id, &card->card_rev);
if (err)
- goto free_card;
+ goto out;
- /* Firmware load */
err = spu_read_u32(card, IF_SPI_SCRATCH_4_REG, &scratch);
if (err)
- goto free_card;
+ goto out;
if (scratch == SUCCESSFUL_FW_DOWNLOAD_MAGIC)
lbs_deb_spi("Firmware is already loaded for "
"Marvell WLAN 802.11 adapter\n");
@@ -946,7 +1015,7 @@ static int __devinit if_spi_probe(struct spi_device *spi)
lbs_pr_err("Unsupported chip_id: 0x%02x\n",
card->card_id);
err = -ENODEV;
- goto free_card;
+ goto out;
}
err = lbs_get_firmware(&card->spi->dev, NULL, NULL,
@@ -954,7 +1023,7 @@ static int __devinit if_spi_probe(struct spi_device *spi)
&mainfw);
if (err) {
lbs_pr_err("failed to find firmware (%d)\n", err);
- goto free_card;
+ goto out;
}
lbs_deb_spi("Initializing FW for Marvell WLAN 802.11 adapter "
@@ -966,15 +1035,68 @@ static int __devinit if_spi_probe(struct spi_device *spi)
spi->max_speed_hz);
err = if_spi_prog_helper_firmware(card, helper);
if (err)
- goto free_card;
+ goto out;
err = if_spi_prog_main_firmware(card, mainfw);
if (err)
- goto free_card;
+ goto out;
lbs_deb_spi("loaded FW for Marvell WLAN 802.11 adapter\n");
}
err = spu_set_interrupt_mode(card, 0, 1);
if (err)
+ goto out;
+
+out:
+ if (helper)
+ release_firmware(helper);
+ if (mainfw)
+ release_firmware(mainfw);
+
+ lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
+
+ return err;
+}
+
+static int __devinit if_spi_probe(struct spi_device *spi)
+{
+ struct if_spi_card *card;
+ struct lbs_private *priv = NULL;
+ struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
+ int err = 0;
+
+ lbs_deb_enter(LBS_DEB_SPI);
+
+ if (!pdata) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (pdata->setup) {
+ err = pdata->setup(spi);
+ if (err)
+ goto out;
+ }
+
+ /* Allocate card structure to represent this specific device */
+ card = kzalloc(sizeof(struct if_spi_card), GFP_KERNEL);
+ if (!card) {
+ err = -ENOMEM;
+ goto teardown;
+ }
+ spi_set_drvdata(spi, card);
+ card->pdata = pdata;
+ card->spi = spi;
+ card->prev_xfer_time = jiffies;
+
+ INIT_LIST_HEAD(&card->cmd_packet_list);
+ INIT_LIST_HEAD(&card->data_packet_list);
+ spin_lock_init(&card->buffer_lock);
+
+ /* Initialize the SPI Interface Unit */
+
+ /* Firmware load */
+ err = if_spi_init_card(card);
+ if (err)
goto free_card;
/* Register our card with libertas.
@@ -993,27 +1115,16 @@ static int __devinit if_spi_probe(struct spi_device *spi)
priv->fw_ready = 1;
/* Initialize interrupt handling stuff. */
- card->run_thread = 1;
- card->spi_thread = kthread_run(lbs_spi_thread, card, "lbs_spi_thread");
- if (IS_ERR(card->spi_thread)) {
- card->run_thread = 0;
- err = PTR_ERR(card->spi_thread);
- lbs_pr_err("error creating SPI thread: err=%d\n", err);
- goto remove_card;
- }
- if (sched_setscheduler(card->spi_thread, SCHED_FIFO, &param))
- lbs_pr_err("Error setting scheduler, using default.\n");
+ card->workqueue = create_workqueue("libertas_spi");
+ INIT_WORK(&card->packet_work, if_spi_host_to_card_worker);
err = request_irq(spi->irq, if_spi_host_interrupt,
IRQF_TRIGGER_FALLING, "libertas_spi", card);
if (err) {
lbs_pr_err("can't get host irq line-- request_irq failed\n");
- goto terminate_thread;
+ goto terminate_workqueue;
}
- /* poke the IRQ handler so that we don't miss the first interrupt */
- up(&card->spi_ready);
-
/* Start the card.
* This will call register_netdev, and we'll start
* getting interrupts... */
@@ -1028,18 +1139,16 @@ static int __devinit if_spi_probe(struct spi_device *spi)
release_irq:
free_irq(spi->irq, card);
-terminate_thread:
- if_spi_terminate_spi_thread(card);
-remove_card:
+terminate_workqueue:
+ flush_workqueue(card->workqueue);
+ destroy_workqueue(card->workqueue);
lbs_remove_card(priv); /* will call free_netdev */
free_card:
free_if_spi_card(card);
+teardown:
+ if (pdata->teardown)
+ pdata->teardown(spi);
out:
- if (helper)
- release_firmware(helper);
- if (mainfw)
- release_firmware(mainfw);
-
lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
return err;
}
@@ -1056,7 +1165,8 @@ static int __devexit libertas_spi_remove(struct spi_device *spi)
lbs_remove_card(priv); /* will call free_netdev */
free_irq(spi->irq, card);
- if_spi_terminate_spi_thread(card);
+ flush_workqueue(card->workqueue);
+ destroy_workqueue(card->workqueue);
if (card->pdata->teardown)
card->pdata->teardown(spi);
free_if_spi_card(card);
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 6836a6d..ca8149c 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -539,6 +539,43 @@ static int lbs_thread(void *data)
return 0;
}
+/**
+ * @brief This function gets the HW spec from the firmware and sets
+ * some basic parameters.
+ *
+ * @param priv A pointer to struct lbs_private structure
+ * @return 0 or -1
+ */
+static int lbs_setup_firmware(struct lbs_private *priv)
+{
+ int ret = -1;
+ s16 curlevel = 0, minlevel = 0, maxlevel = 0;
+
+ lbs_deb_enter(LBS_DEB_FW);
+
+ /* Read MAC address from firmware */
+ memset(priv->current_addr, 0xff, ETH_ALEN);
+ ret = lbs_update_hw_spec(priv);
+ if (ret)
+ goto done;
+
+ /* Read power levels if available */
+ ret = lbs_get_tx_power(priv, &curlevel, &minlevel, &maxlevel);
+ if (ret == 0) {
+ priv->txpower_cur = curlevel;
+ priv->txpower_min = minlevel;
+ priv->txpower_max = maxlevel;
+ }
+
+ /* Send cmd to FW to enable 11D function */
+ ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1);
+
+ lbs_set_mac_control(priv);
+done:
+ lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
+ return ret;
+}
+
int lbs_suspend(struct lbs_private *priv)
{
int ret;
@@ -584,47 +621,13 @@ int lbs_resume(struct lbs_private *priv)
lbs_pr_err("deep sleep activation failed: %d\n", ret);
}
- lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
- return ret;
-}
-EXPORT_SYMBOL_GPL(lbs_resume);
-
-/**
- * @brief This function gets the HW spec from the firmware and sets
- * some basic parameters.
- *
- * @param priv A pointer to struct lbs_private structure
- * @return 0 or -1
- */
-static int lbs_setup_firmware(struct lbs_private *priv)
-{
- int ret = -1;
- s16 curlevel = 0, minlevel = 0, maxlevel = 0;
-
- lbs_deb_enter(LBS_DEB_FW);
-
- /* Read MAC address from firmware */
- memset(priv->current_addr, 0xff, ETH_ALEN);
- ret = lbs_update_hw_spec(priv);
- if (ret)
- goto done;
-
- /* Read power levels if available */
- ret = lbs_get_tx_power(priv, &curlevel, &minlevel, &maxlevel);
- if (ret == 0) {
- priv->txpower_cur = curlevel;
- priv->txpower_min = minlevel;
- priv->txpower_max = maxlevel;
- }
+ if (priv->setup_fw_on_resume)
+ ret = lbs_setup_firmware(priv);
- /* Send cmd to FW to enable 11D function */
- ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1);
-
- lbs_set_mac_control(priv);
-done:
lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
return ret;
}
+EXPORT_SYMBOL_GPL(lbs_resume);
/**
* This function handles the timeout of command sending.
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 454f045..5d39b28 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -943,7 +943,8 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
{
switch (action) {
case IEEE80211_AMPDU_TX_START:
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 9ecf840..af4f2c6 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -232,6 +232,9 @@ struct mwl8k_priv {
struct completion firmware_loading_complete;
};
+#define MAX_WEP_KEY_LEN 13
+#define NUM_WEP_KEYS 4
+
/* Per interface specific private data */
struct mwl8k_vif {
struct list_head list;
@@ -242,8 +245,21 @@ struct mwl8k_vif {
/* Non AMPDU sequence number assigned by driver. */
u16 seqno;
+
+ /* Saved WEP keys */
+ struct {
+ u8 enabled;
+ u8 key[sizeof(struct ieee80211_key_conf) + MAX_WEP_KEY_LEN];
+ } wep_key_conf[NUM_WEP_KEYS];
+
+ /* BSSID */
+ u8 bssid[ETH_ALEN];
+
+ /* A flag to indicate is HW crypto is enabled for this bssid */
+ bool is_hw_crypto_enabled;
};
#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv))
+#define IEEE80211_KEY_CONF(_u8) ((struct ieee80211_key_conf *)(_u8))
struct mwl8k_sta {
/* Index into station database. Returned by UPDATE_STADB. */
@@ -337,6 +353,7 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203
#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */
#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */
+#define MWL8K_CMD_UPDATE_ENCRYPTION 0x1122 /* per-vif */
#define MWL8K_CMD_UPDATE_STADB 0x1123
static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
@@ -375,6 +392,7 @@ static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
MWL8K_CMDNAME(SET_RATEADAPT_MODE);
MWL8K_CMDNAME(BSS_START);
MWL8K_CMDNAME(SET_NEW_STN);
+ MWL8K_CMDNAME(UPDATE_ENCRYPTION);
MWL8K_CMDNAME(UPDATE_STADB);
default:
snprintf(buf, bufsize, "0x%x", cmd);
@@ -715,10 +733,12 @@ static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos)
skb_pull(skb, sizeof(*tr) - hdrlen);
}
-static inline void mwl8k_add_dma_header(struct sk_buff *skb)
+static void
+mwl8k_add_dma_header(struct sk_buff *skb, int tail_pad)
{
struct ieee80211_hdr *wh;
int hdrlen;
+ int reqd_hdrlen;
struct mwl8k_dma_data *tr;
/*
@@ -730,11 +750,13 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
wh = (struct ieee80211_hdr *)skb->data;
hdrlen = ieee80211_hdrlen(wh->frame_control);
- if (hdrlen != sizeof(*tr))
- skb_push(skb, sizeof(*tr) - hdrlen);
+ reqd_hdrlen = sizeof(*tr);
+
+ if (hdrlen != reqd_hdrlen)
+ skb_push(skb, reqd_hdrlen - hdrlen);
if (ieee80211_is_data_qos(wh->frame_control))
- hdrlen -= 2;
+ hdrlen -= IEEE80211_QOS_CTL_LEN;
tr = (struct mwl8k_dma_data *)skb->data;
if (wh != &tr->wh)
@@ -747,9 +769,52 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
* payload". That is, everything except for the 802.11 header.
* This includes all crypto material including the MIC.
*/
- tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr));
+ tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr) + tail_pad);
}
+static void mwl8k_encapsulate_tx_frame(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *wh;
+ struct ieee80211_tx_info *tx_info;
+ struct ieee80211_key_conf *key_conf;
+ int data_pad;
+
+ wh = (struct ieee80211_hdr *)skb->data;
+
+ tx_info = IEEE80211_SKB_CB(skb);
+
+ key_conf = NULL;
+ if (ieee80211_is_data(wh->frame_control))
+ key_conf = tx_info->control.hw_key;
+
+ /*
+ * Make sure the packet header is in the DMA header format (4-address
+ * without QoS), the necessary crypto padding between the header and the
+ * payload has already been provided by mac80211, but it doesn't add tail
+ * padding when HW crypto is enabled.
+ *
+ * We have the following trailer padding requirements:
+ * - WEP: 4 trailer bytes (ICV)
+ * - TKIP: 12 trailer bytes (8 MIC + 4 ICV)
+ * - CCMP: 8 trailer bytes (MIC)
+ */
+ data_pad = 0;
+ if (key_conf != NULL) {
+ switch (key_conf->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ data_pad = 4;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ data_pad = 12;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ data_pad = 8;
+ break;
+ }
+ }
+ mwl8k_add_dma_header(skb, data_pad);
+}
/*
* Packet reception for 88w8366 AP firmware.
@@ -778,6 +843,13 @@ struct mwl8k_rxd_8366_ap {
#define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST 0x80
+/* 8366 AP rx_status bits */
+#define MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK 0x80
+#define MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR 0xFF
+#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR 0x02
+#define MWL8K_8366_AP_RXSTAT_WEP_DECRYPT_ICV_ERR 0x04
+#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_ICV_ERR 0x08
+
static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr)
{
struct mwl8k_rxd_8366_ap *rxd = _rxd;
@@ -834,10 +906,16 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
} else {
status->band = IEEE80211_BAND_2GHZ;
}
- status->freq = ieee80211_channel_to_frequency(rxd->channel);
+ status->freq = ieee80211_channel_to_frequency(rxd->channel,
+ status->band);
*qos = rxd->qos_control;
+ if ((rxd->rx_status != MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR) &&
+ (rxd->rx_status & MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK) &&
+ (rxd->rx_status & MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR))
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
return le16_to_cpu(rxd->pkt_len);
}
@@ -876,6 +954,11 @@ struct mwl8k_rxd_sta {
#define MWL8K_STA_RATE_INFO_MCS_FORMAT 0x0001
#define MWL8K_STA_RX_CTRL_OWNED_BY_HOST 0x02
+#define MWL8K_STA_RX_CTRL_DECRYPT_ERROR 0x04
+/* ICV=0 or MIC=1 */
+#define MWL8K_STA_RX_CTRL_DEC_ERR_TYPE 0x08
+/* Key is uploaded only in failure case */
+#define MWL8K_STA_RX_CTRL_KEY_INDEX 0x30
static void mwl8k_rxd_sta_init(void *_rxd, dma_addr_t next_dma_addr)
{
@@ -931,9 +1014,13 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
} else {
status->band = IEEE80211_BAND_2GHZ;
}
- status->freq = ieee80211_channel_to_frequency(rxd->channel);
+ status->freq = ieee80211_channel_to_frequency(rxd->channel,
+ status->band);
*qos = rxd->qos_control;
+ if ((rxd->rx_ctrl & MWL8K_STA_RX_CTRL_DECRYPT_ERROR) &&
+ (rxd->rx_ctrl & MWL8K_STA_RX_CTRL_DEC_ERR_TYPE))
+ status->flag |= RX_FLAG_MMIC_ERROR;
return le16_to_cpu(rxd->pkt_len);
}
@@ -1092,9 +1179,25 @@ static inline void mwl8k_save_beacon(struct ieee80211_hw *hw,
ieee80211_queue_work(hw, &priv->finalize_join_worker);
}
+static inline struct mwl8k_vif *mwl8k_find_vif_bss(struct list_head *vif_list,
+ u8 *bssid)
+{
+ struct mwl8k_vif *mwl8k_vif;
+
+ list_for_each_entry(mwl8k_vif,
+ vif_list, list) {
+ if (memcmp(bssid, mwl8k_vif->bssid,
+ ETH_ALEN) == 0)
+ return mwl8k_vif;
+ }
+
+ return NULL;
+}
+
static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
{
struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_vif *mwl8k_vif = NULL;
struct mwl8k_rx_queue *rxq = priv->rxq + index;
int processed;
@@ -1104,6 +1207,7 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
void *rxd;
int pkt_len;
struct ieee80211_rx_status status;
+ struct ieee80211_hdr *wh;
__le16 qos;
skb = rxq->buf[rxq->head].skb;
@@ -1130,8 +1234,7 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
rxq->rxd_count--;
- skb_put(skb, pkt_len);
- mwl8k_remove_dma_header(skb, qos);
+ wh = &((struct mwl8k_dma_data *)skb->data)->wh;
/*
* Check for a pending join operation. Save a
@@ -1141,6 +1244,46 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
if (mwl8k_capture_bssid(priv, (void *)skb->data))
mwl8k_save_beacon(hw, skb);
+ if (ieee80211_has_protected(wh->frame_control)) {
+
+ /* Check if hw crypto has been enabled for
+ * this bss. If yes, set the status flags
+ * accordingly
+ */
+ mwl8k_vif = mwl8k_find_vif_bss(&priv->vif_list,
+ wh->addr1);
+
+ if (mwl8k_vif != NULL &&
+ mwl8k_vif->is_hw_crypto_enabled == true) {
+ /*
+ * When MMIC ERROR is encountered
+ * by the firmware, payload is
+ * dropped and only 32 bytes of
+ * mwl8k Firmware header is sent
+ * to the host.
+ *
+ * We need to add four bytes of
+ * key information. In it
+ * MAC80211 expects keyidx set to
+ * 0 for triggering Counter
+ * Measure of MMIC failure.
+ */
+ if (status.flag & RX_FLAG_MMIC_ERROR) {
+ struct mwl8k_dma_data *tr;
+ tr = (struct mwl8k_dma_data *)skb->data;
+ memset((void *)&(tr->data), 0, 4);
+ pkt_len += 4;
+ }
+
+ if (!ieee80211_is_auth(wh->frame_control))
+ status.flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_DECRYPTED |
+ RX_FLAG_MMIC_STRIPPED;
+ }
+ }
+
+ skb_put(skb, pkt_len);
+ mwl8k_remove_dma_header(skb, qos);
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
ieee80211_rx_irqsafe(hw, skb);
@@ -1443,7 +1586,11 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
else
qos = 0;
- mwl8k_add_dma_header(skb);
+ if (priv->ap_fw)
+ mwl8k_encapsulate_tx_frame(skb);
+ else
+ mwl8k_add_dma_header(skb, 0);
+
wh = &((struct mwl8k_dma_data *)skb->data)->wh;
tx_info = IEEE80211_SKB_CB(skb);
@@ -3099,6 +3246,274 @@ static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw,
}
/*
+ * CMD_UPDATE_ENCRYPTION.
+ */
+
+#define MAX_ENCR_KEY_LENGTH 16
+#define MIC_KEY_LENGTH 8
+
+struct mwl8k_cmd_update_encryption {
+ struct mwl8k_cmd_pkt header;
+
+ __le32 action;
+ __le32 reserved;
+ __u8 mac_addr[6];
+ __u8 encr_type;
+
+} __attribute__((packed));
+
+struct mwl8k_cmd_set_key {
+ struct mwl8k_cmd_pkt header;
+
+ __le32 action;
+ __le32 reserved;
+ __le16 length;
+ __le16 key_type_id;
+ __le32 key_info;
+ __le32 key_id;
+ __le16 key_len;
+ __u8 key_material[MAX_ENCR_KEY_LENGTH];
+ __u8 tkip_tx_mic_key[MIC_KEY_LENGTH];
+ __u8 tkip_rx_mic_key[MIC_KEY_LENGTH];
+ __le16 tkip_rsc_low;
+ __le32 tkip_rsc_high;
+ __le16 tkip_tsc_low;
+ __le32 tkip_tsc_high;
+ __u8 mac_addr[6];
+} __attribute__((packed));
+
+enum {
+ MWL8K_ENCR_ENABLE,
+ MWL8K_ENCR_SET_KEY,
+ MWL8K_ENCR_REMOVE_KEY,
+ MWL8K_ENCR_SET_GROUP_KEY,
+};
+
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_WEP 0
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_DISABLE 1
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_TKIP 4
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_MIXED 7
+#define MWL8K_UPDATE_ENCRYPTION_TYPE_AES 8
+
+enum {
+ MWL8K_ALG_WEP,
+ MWL8K_ALG_TKIP,
+ MWL8K_ALG_CCMP,
+};
+
+#define MWL8K_KEY_FLAG_TXGROUPKEY 0x00000004
+#define MWL8K_KEY_FLAG_PAIRWISE 0x00000008
+#define MWL8K_KEY_FLAG_TSC_VALID 0x00000040
+#define MWL8K_KEY_FLAG_WEP_TXKEY 0x01000000
+#define MWL8K_KEY_FLAG_MICKEY_VALID 0x02000000
+
+static int mwl8k_cmd_update_encryption_enable(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u8 *addr,
+ u8 encr_type)
+{
+ struct mwl8k_cmd_update_encryption *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_ENCRYPTION);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32(MWL8K_ENCR_ENABLE);
+ memcpy(cmd->mac_addr, addr, ETH_ALEN);
+ cmd->encr_type = encr_type;
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+static int mwl8k_encryption_set_cmd_info(struct mwl8k_cmd_set_key *cmd,
+ u8 *addr,
+ struct ieee80211_key_conf *key)
+{
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_ENCRYPTION);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->length = cpu_to_le16(sizeof(*cmd) -
+ offsetof(struct mwl8k_cmd_set_key, length));
+ cmd->key_id = cpu_to_le32(key->keyidx);
+ cmd->key_len = cpu_to_le16(key->keylen);
+ memcpy(cmd->mac_addr, addr, ETH_ALEN);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ cmd->key_type_id = cpu_to_le16(MWL8K_ALG_WEP);
+ if (key->keyidx == 0)
+ cmd->key_info = cpu_to_le32(MWL8K_KEY_FLAG_WEP_TXKEY);
+
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ cmd->key_type_id = cpu_to_le16(MWL8K_ALG_TKIP);
+ cmd->key_info = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ ? cpu_to_le32(MWL8K_KEY_FLAG_PAIRWISE)
+ : cpu_to_le32(MWL8K_KEY_FLAG_TXGROUPKEY);
+ cmd->key_info |= cpu_to_le32(MWL8K_KEY_FLAG_MICKEY_VALID
+ | MWL8K_KEY_FLAG_TSC_VALID);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ cmd->key_type_id = cpu_to_le16(MWL8K_ALG_CCMP);
+ cmd->key_info = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ ? cpu_to_le32(MWL8K_KEY_FLAG_PAIRWISE)
+ : cpu_to_le32(MWL8K_KEY_FLAG_TXGROUPKEY);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int mwl8k_cmd_encryption_set_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u8 *addr,
+ struct ieee80211_key_conf *key)
+{
+ struct mwl8k_cmd_set_key *cmd;
+ int rc;
+ int keymlen;
+ u32 action;
+ u8 idx;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ rc = mwl8k_encryption_set_cmd_info(cmd, addr, key);
+ if (rc < 0)
+ goto done;
+
+ idx = key->keyidx;
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ action = MWL8K_ENCR_SET_KEY;
+ else
+ action = MWL8K_ENCR_SET_GROUP_KEY;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ if (!mwl8k_vif->wep_key_conf[idx].enabled) {
+ memcpy(mwl8k_vif->wep_key_conf[idx].key, key,
+ sizeof(*key) + key->keylen);
+ mwl8k_vif->wep_key_conf[idx].enabled = 1;
+ }
+
+ keymlen = 0;
+ action = MWL8K_ENCR_SET_KEY;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ keymlen = MAX_ENCR_KEY_LENGTH + 2 * MIC_KEY_LENGTH;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ keymlen = key->keylen;
+ break;
+ default:
+ rc = -ENOTSUPP;
+ goto done;
+ }
+
+ memcpy(cmd->key_material, key->key, keymlen);
+ cmd->action = cpu_to_le32(action);
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+done:
+ kfree(cmd);
+
+ return rc;
+}
+
+static int mwl8k_cmd_encryption_remove_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u8 *addr,
+ struct ieee80211_key_conf *key)
+{
+ struct mwl8k_cmd_set_key *cmd;
+ int rc;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ rc = mwl8k_encryption_set_cmd_info(cmd, addr, key);
+ if (rc < 0)
+ goto done;
+
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ WLAN_CIPHER_SUITE_WEP104)
+ mwl8k_vif->wep_key_conf[key->keyidx].enabled = 0;
+
+ cmd->action = cpu_to_le32(MWL8K_ENCR_REMOVE_KEY);
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+done:
+ kfree(cmd);
+
+ return rc;
+}
+
+static int mwl8k_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd_param,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ int rc = 0;
+ u8 encr_type;
+ u8 *addr;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ return -EOPNOTSUPP;
+
+ if (sta == NULL)
+ addr = hw->wiphy->perm_addr;
+ else
+ addr = sta->addr;
+
+ if (cmd_param == SET_KEY) {
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ rc = mwl8k_cmd_encryption_set_key(hw, vif, addr, key);
+ if (rc)
+ goto out;
+
+ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40)
+ || (key->cipher == WLAN_CIPHER_SUITE_WEP104))
+ encr_type = MWL8K_UPDATE_ENCRYPTION_TYPE_WEP;
+ else
+ encr_type = MWL8K_UPDATE_ENCRYPTION_TYPE_MIXED;
+
+ rc = mwl8k_cmd_update_encryption_enable(hw, vif, addr,
+ encr_type);
+ if (rc)
+ goto out;
+
+ mwl8k_vif->is_hw_crypto_enabled = true;
+
+ } else {
+ rc = mwl8k_cmd_encryption_remove_key(hw, vif, addr, key);
+
+ if (rc)
+ goto out;
+
+ mwl8k_vif->is_hw_crypto_enabled = false;
+
+ }
+out:
+ return rc;
+}
+
+/*
* CMD_UPDATE_STADB.
*/
struct ewc_ht_info {
@@ -3469,6 +3884,8 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
mwl8k_vif->vif = vif;
mwl8k_vif->macid = macid;
mwl8k_vif->seqno = 0;
+ memcpy(mwl8k_vif->bssid, vif->addr, ETH_ALEN);
+ mwl8k_vif->is_hw_crypto_enabled = false;
/* Set the mac address. */
mwl8k_cmd_set_mac_addr(hw, vif, vif->addr);
@@ -3866,18 +4283,27 @@ static int mwl8k_sta_add(struct ieee80211_hw *hw,
{
struct mwl8k_priv *priv = hw->priv;
int ret;
+ int i;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+ struct ieee80211_key_conf *key;
if (!priv->ap_fw) {
ret = mwl8k_cmd_update_stadb_add(hw, vif, sta);
if (ret >= 0) {
MWL8K_STA(sta)->peer_id = ret;
- return 0;
+ ret = 0;
}
- return ret;
+ } else {
+ ret = mwl8k_cmd_set_new_stn_add(hw, vif, sta);
}
- return mwl8k_cmd_set_new_stn_add(hw, vif, sta);
+ for (i = 0; i < NUM_WEP_KEYS; i++) {
+ key = IEEE80211_KEY_CONF(mwl8k_vif->wep_key_conf[i].key);
+ if (mwl8k_vif->wep_key_conf[i].enabled)
+ mwl8k_set_key(hw, SET_KEY, vif, sta, key);
+ }
+ return ret;
}
static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
@@ -3932,7 +4358,8 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
static int
mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
{
switch (action) {
case IEEE80211_AMPDU_RX_START:
@@ -3955,6 +4382,7 @@ static const struct ieee80211_ops mwl8k_ops = {
.bss_info_changed = mwl8k_bss_info_changed,
.prepare_multicast = mwl8k_prepare_multicast,
.configure_filter = mwl8k_configure_filter,
+ .set_key = mwl8k_set_key,
.set_rts_threshold = mwl8k_set_rts_threshold,
.sta_add = mwl8k_sta_add,
.sta_remove = mwl8k_sta_remove,
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 54ca49a..2725f3c 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -46,7 +46,7 @@
* These indirect registers work with busy bits,
* and we will try maximal REGISTER_BUSY_COUNT times to access
* the register while taking a REGISTER_BUSY_DELAY us delay
- * between each attampt. When the busy bit is still set at that time,
+ * between each attempt. When the busy bit is still set at that time,
* the access attempt is considered to have failed,
* and we will print an error.
*/
@@ -305,9 +305,7 @@ static void rt2400pci_config_intf(struct rt2x00_dev *rt2x00dev,
* Enable synchronisation.
*/
rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
- rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync);
- rt2x00_set_field32(&reg, CSR14_TBCN, 1);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
}
@@ -647,6 +645,11 @@ static void rt2400pci_start_queue(struct data_queue *queue)
rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
break;
case QID_BEACON:
+ /*
+ * Allow the tbtt tasklet to be scheduled.
+ */
+ tasklet_enable(&rt2x00dev->tbtt_tasklet);
+
rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
rt2x00_set_field32(&reg, CSR14_TBCN, 1);
@@ -708,6 +711,11 @@ static void rt2400pci_stop_queue(struct data_queue *queue)
rt2x00_set_field32(&reg, CSR14_TBCN, 0);
rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
+
+ /*
+ * Wait for possibly running tbtt tasklets.
+ */
+ tasklet_disable(&rt2x00dev->tbtt_tasklet);
break;
default:
break;
@@ -963,9 +971,9 @@ static int rt2400pci_init_bbp(struct rt2x00_dev *rt2x00dev)
static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
- int mask = (state == STATE_RADIO_IRQ_OFF) ||
- (state == STATE_RADIO_IRQ_OFF_ISR);
+ int mask = (state == STATE_RADIO_IRQ_OFF);
u32 reg;
+ unsigned long flags;
/*
* When interrupts are being enabled, the interrupt registers
@@ -974,12 +982,20 @@ static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
if (state == STATE_RADIO_IRQ_ON) {
rt2x00pci_register_read(rt2x00dev, CSR7, &reg);
rt2x00pci_register_write(rt2x00dev, CSR7, reg);
+
+ /*
+ * Enable tasklets.
+ */
+ tasklet_enable(&rt2x00dev->txstatus_tasklet);
+ tasklet_enable(&rt2x00dev->rxdone_tasklet);
}
/*
* Only toggle the interrupts bits we are going to use.
* Non-checked interrupt bits are disabled by default.
*/
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask);
rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask);
@@ -987,6 +1003,17 @@ static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask);
rt2x00_set_field32(&reg, CSR8_RXDONE, mask);
rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+ if (state == STATE_RADIO_IRQ_OFF) {
+ /*
+ * Ensure that all tasklets are finished before
+ * disabling the interrupts.
+ */
+ tasklet_disable(&rt2x00dev->txstatus_tasklet);
+ tasklet_disable(&rt2x00dev->rxdone_tasklet);
+ }
}
static int rt2400pci_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -1059,9 +1086,7 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev,
rt2400pci_disable_radio(rt2x00dev);
break;
case STATE_RADIO_IRQ_ON:
- case STATE_RADIO_IRQ_ON_ISR:
case STATE_RADIO_IRQ_OFF:
- case STATE_RADIO_IRQ_OFF_ISR:
rt2400pci_toggle_irq(rt2x00dev, state);
break;
case STATE_DEEP_SLEEP:
@@ -1183,8 +1208,6 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
/*
* Enable beaconing again.
*/
- rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
- rt2x00_set_field32(&reg, CSR14_TBCN, 1);
rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
}
@@ -1289,57 +1312,71 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
}
}
-static irqreturn_t rt2400pci_interrupt_thread(int irq, void *dev_instance)
+static void rt2400pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00_field32 irq_field)
{
- struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg = rt2x00dev->irqvalue[0];
+ unsigned long flags;
+ u32 reg;
/*
- * Handle interrupts, walk through all bits
- * and run the tasks, the bits are checked in order of
- * priority.
+ * Enable a single interrupt. The interrupt mask register
+ * access needs locking.
*/
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
- /*
- * 1 - Beacon timer expired interrupt.
- */
- if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
- rt2x00lib_beacondone(rt2x00dev);
+ rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+ rt2x00_set_field32(&reg, irq_field, 0);
+ rt2x00pci_register_write(rt2x00dev, CSR8, reg);
- /*
- * 2 - Rx ring done interrupt.
- */
- if (rt2x00_get_field32(reg, CSR7_RXDONE))
- rt2x00pci_rxdone(rt2x00dev);
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+}
- /*
- * 3 - Atim ring transmit done interrupt.
- */
- if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING))
- rt2400pci_txdone(rt2x00dev, QID_ATIM);
+static void rt2400pci_txstatus_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ u32 reg;
+ unsigned long flags;
/*
- * 4 - Priority ring transmit done interrupt.
+ * Handle all tx queues.
*/
- if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING))
- rt2400pci_txdone(rt2x00dev, QID_AC_VO);
+ rt2400pci_txdone(rt2x00dev, QID_ATIM);
+ rt2400pci_txdone(rt2x00dev, QID_AC_VO);
+ rt2400pci_txdone(rt2x00dev, QID_AC_VI);
/*
- * 5 - Tx ring transmit done interrupt.
+ * Enable all TXDONE interrupts again.
*/
- if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING))
- rt2400pci_txdone(rt2x00dev, QID_AC_VI);
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
- /* Enable interrupts again. */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_ON_ISR);
- return IRQ_HANDLED;
+ rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
+ rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+}
+
+static void rt2400pci_tbtt_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00lib_beacondone(rt2x00dev);
+ rt2400pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE);
+}
+
+static void rt2400pci_rxdone_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00pci_rxdone(rt2x00dev);
+ rt2400pci_enable_interrupt(rt2x00dev, CSR8_RXDONE);
}
static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg;
+ u32 reg, mask;
+ unsigned long flags;
/*
* Get the interrupt sources & saved to local variable.
@@ -1354,14 +1391,44 @@ static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
return IRQ_HANDLED;
- /* Store irqvalues for use in the interrupt thread. */
- rt2x00dev->irqvalue[0] = reg;
+ mask = reg;
- /* Disable interrupts, will be enabled again in the interrupt thread. */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_OFF_ISR);
+ /*
+ * Schedule tasklets for interrupt handling.
+ */
+ if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
+ tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
- return IRQ_WAKE_THREAD;
+ if (rt2x00_get_field32(reg, CSR7_RXDONE))
+ tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+
+ if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING) ||
+ rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING) ||
+ rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) {
+ tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+ /*
+ * Mask out all txdone interrupts.
+ */
+ rt2x00_set_field32(&mask, CSR8_TXDONE_TXRING, 1);
+ rt2x00_set_field32(&mask, CSR8_TXDONE_ATIMRING, 1);
+ rt2x00_set_field32(&mask, CSR8_TXDONE_PRIORING, 1);
+ }
+
+ /*
+ * Disable all interrupts for which a tasklet was scheduled right now,
+ * the tasklet will reenable the appropriate interrupts.
+ */
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
+ rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+ reg |= mask;
+ rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+
+
+ return IRQ_HANDLED;
}
/*
@@ -1655,7 +1722,9 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = {
static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
.irq_handler = rt2400pci_interrupt,
- .irq_handler_thread = rt2400pci_interrupt_thread,
+ .txstatus_tasklet = rt2400pci_txstatus_tasklet,
+ .tbtt_tasklet = rt2400pci_tbtt_tasklet,
+ .rxdone_tasklet = rt2400pci_rxdone_tasklet,
.probe_hw = rt2400pci_probe_hw,
.initialize = rt2x00pci_initialize,
.uninitialize = rt2x00pci_uninitialize,
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index a9ff26a..3ef1fb4 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -311,9 +311,7 @@ static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev,
* Enable synchronisation.
*/
rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
- rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync);
- rt2x00_set_field32(&reg, CSR14_TBCN, 1);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
}
@@ -737,6 +735,11 @@ static void rt2500pci_start_queue(struct data_queue *queue)
rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
break;
case QID_BEACON:
+ /*
+ * Allow the tbtt tasklet to be scheduled.
+ */
+ tasklet_enable(&rt2x00dev->tbtt_tasklet);
+
rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
rt2x00_set_field32(&reg, CSR14_TBCN, 1);
@@ -798,6 +801,11 @@ static void rt2500pci_stop_queue(struct data_queue *queue)
rt2x00_set_field32(&reg, CSR14_TBCN, 0);
rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
+
+ /*
+ * Wait for possibly running tbtt tasklets.
+ */
+ tasklet_disable(&rt2x00dev->tbtt_tasklet);
break;
default:
break;
@@ -1118,9 +1126,9 @@ static int rt2500pci_init_bbp(struct rt2x00_dev *rt2x00dev)
static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
- int mask = (state == STATE_RADIO_IRQ_OFF) ||
- (state == STATE_RADIO_IRQ_OFF_ISR);
+ int mask = (state == STATE_RADIO_IRQ_OFF);
u32 reg;
+ unsigned long flags;
/*
* When interrupts are being enabled, the interrupt registers
@@ -1129,12 +1137,20 @@ static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
if (state == STATE_RADIO_IRQ_ON) {
rt2x00pci_register_read(rt2x00dev, CSR7, &reg);
rt2x00pci_register_write(rt2x00dev, CSR7, reg);
+
+ /*
+ * Enable tasklets.
+ */
+ tasklet_enable(&rt2x00dev->txstatus_tasklet);
+ tasklet_enable(&rt2x00dev->rxdone_tasklet);
}
/*
* Only toggle the interrupts bits we are going to use.
* Non-checked interrupt bits are disabled by default.
*/
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask);
rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask);
@@ -1142,6 +1158,16 @@ static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask);
rt2x00_set_field32(&reg, CSR8_RXDONE, mask);
rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+ if (state == STATE_RADIO_IRQ_OFF) {
+ /*
+ * Ensure that all tasklets are finished.
+ */
+ tasklet_disable(&rt2x00dev->txstatus_tasklet);
+ tasklet_disable(&rt2x00dev->rxdone_tasklet);
+ }
}
static int rt2500pci_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -1214,9 +1240,7 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev,
rt2500pci_disable_radio(rt2x00dev);
break;
case STATE_RADIO_IRQ_ON:
- case STATE_RADIO_IRQ_ON_ISR:
case STATE_RADIO_IRQ_OFF:
- case STATE_RADIO_IRQ_OFF_ISR:
rt2500pci_toggle_irq(rt2x00dev, state);
break;
case STATE_DEEP_SLEEP:
@@ -1337,8 +1361,6 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
/*
* Enable beaconing again.
*/
- rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
- rt2x00_set_field32(&reg, CSR14_TBCN, 1);
rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
}
@@ -1422,58 +1444,71 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
}
}
-static irqreturn_t rt2500pci_interrupt_thread(int irq, void *dev_instance)
+static void rt2500pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00_field32 irq_field)
{
- struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg = rt2x00dev->irqvalue[0];
+ unsigned long flags;
+ u32 reg;
/*
- * Handle interrupts, walk through all bits
- * and run the tasks, the bits are checked in order of
- * priority.
+ * Enable a single interrupt. The interrupt mask register
+ * access needs locking.
*/
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
- /*
- * 1 - Beacon timer expired interrupt.
- */
- if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
- rt2x00lib_beacondone(rt2x00dev);
+ rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+ rt2x00_set_field32(&reg, irq_field, 0);
+ rt2x00pci_register_write(rt2x00dev, CSR8, reg);
- /*
- * 2 - Rx ring done interrupt.
- */
- if (rt2x00_get_field32(reg, CSR7_RXDONE))
- rt2x00pci_rxdone(rt2x00dev);
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+}
- /*
- * 3 - Atim ring transmit done interrupt.
- */
- if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING))
- rt2500pci_txdone(rt2x00dev, QID_ATIM);
+static void rt2500pci_txstatus_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ u32 reg;
+ unsigned long flags;
/*
- * 4 - Priority ring transmit done interrupt.
+ * Handle all tx queues.
*/
- if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING))
- rt2500pci_txdone(rt2x00dev, QID_AC_VO);
+ rt2500pci_txdone(rt2x00dev, QID_ATIM);
+ rt2500pci_txdone(rt2x00dev, QID_AC_VO);
+ rt2500pci_txdone(rt2x00dev, QID_AC_VI);
/*
- * 5 - Tx ring transmit done interrupt.
+ * Enable all TXDONE interrupts again.
*/
- if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING))
- rt2500pci_txdone(rt2x00dev, QID_AC_VI);
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
+ rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0);
+ rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
+ rt2x00pci_register_write(rt2x00dev, CSR8, reg);
- /* Enable interrupts again. */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_ON_ISR);
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+}
- return IRQ_HANDLED;
+static void rt2500pci_tbtt_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00lib_beacondone(rt2x00dev);
+ rt2500pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE);
+}
+
+static void rt2500pci_rxdone_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00pci_rxdone(rt2x00dev);
+ rt2500pci_enable_interrupt(rt2x00dev, CSR8_RXDONE);
}
static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg;
+ u32 reg, mask;
+ unsigned long flags;
/*
* Get the interrupt sources & saved to local variable.
@@ -1488,14 +1523,42 @@ static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
return IRQ_HANDLED;
- /* Store irqvalues for use in the interrupt thread. */
- rt2x00dev->irqvalue[0] = reg;
+ mask = reg;
- /* Disable interrupts, will be enabled again in the interrupt thread. */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_OFF_ISR);
+ /*
+ * Schedule tasklets for interrupt handling.
+ */
+ if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE))
+ tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
- return IRQ_WAKE_THREAD;
+ if (rt2x00_get_field32(reg, CSR7_RXDONE))
+ tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+
+ if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING) ||
+ rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING) ||
+ rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) {
+ tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+ /*
+ * Mask out all txdone interrupts.
+ */
+ rt2x00_set_field32(&mask, CSR8_TXDONE_TXRING, 1);
+ rt2x00_set_field32(&mask, CSR8_TXDONE_ATIMRING, 1);
+ rt2x00_set_field32(&mask, CSR8_TXDONE_PRIORING, 1);
+ }
+
+ /*
+ * Disable all interrupts for which a tasklet was scheduled right now,
+ * the tasklet will reenable the appropriate interrupts.
+ */
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
+ rt2x00pci_register_read(rt2x00dev, CSR8, &reg);
+ reg |= mask;
+ rt2x00pci_register_write(rt2x00dev, CSR8, reg);
+
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+ return IRQ_HANDLED;
}
/*
@@ -1952,7 +2015,9 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = {
static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
.irq_handler = rt2500pci_interrupt,
- .irq_handler_thread = rt2500pci_interrupt_thread,
+ .txstatus_tasklet = rt2500pci_txstatus_tasklet,
+ .tbtt_tasklet = rt2500pci_tbtt_tasklet,
+ .rxdone_tasklet = rt2500pci_rxdone_tasklet,
.probe_hw = rt2500pci_probe_hw,
.initialize = rt2x00pci_initialize,
.uninitialize = rt2x00pci_uninitialize,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 6b3b1de4..01f385d 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -478,9 +478,7 @@ static void rt2500usb_config_intf(struct rt2x00_dev *rt2x00dev,
rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg);
rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg);
- rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1);
rt2x00_set_field16(&reg, TXRX_CSR19_TSF_SYNC, conf->sync);
- rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1);
rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
}
@@ -1056,9 +1054,7 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev,
rt2500usb_disable_radio(rt2x00dev);
break;
case STATE_RADIO_IRQ_ON:
- case STATE_RADIO_IRQ_ON_ISR:
case STATE_RADIO_IRQ_OFF:
- case STATE_RADIO_IRQ_OFF_ISR:
/* No support, but no error either */
break;
case STATE_DEEP_SLEEP:
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 4c55e85..ec8159c 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -372,8 +372,12 @@
/*
* US_CYC_CNT
+ * BT_MODE_EN: Bluetooth mode enable
+ * CLOCK CYCLE: Clock cycle count in 1us.
+ * PCI:0x21, PCIE:0x7d, USB:0x1e
*/
#define US_CYC_CNT 0x02a4
+#define US_CYC_CNT_BT_MODE_EN FIELD32(0x00000100)
#define US_CYC_CNT_CLOCK_CYCLE FIELD32(0x000000ff)
/*
@@ -1805,6 +1809,12 @@ struct mac_iveiv_entry {
#define RFCSR30_RF_CALIBRATION FIELD8(0x80)
/*
+ * RFCSR 31:
+ */
+#define RFCSR31_RX_AGC_FC FIELD8(0x1f)
+#define RFCSR31_RX_H20M FIELD8(0x20)
+
+/*
* RF registers
*/
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 54917a2..c9bf074 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -818,8 +818,6 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
/*
* Enable beaconing again.
*/
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
@@ -831,8 +829,8 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
}
EXPORT_SYMBOL_GPL(rt2800_write_beacon);
-static inline void rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev,
- unsigned int beacon_base)
+static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev,
+ unsigned int beacon_base)
{
int i;
@@ -845,6 +843,33 @@ static inline void rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev,
rt2800_register_write(rt2x00dev, beacon_base + i, 0);
}
+void rt2800_clear_beacon(struct queue_entry *entry)
+{
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ u32 reg;
+
+ /*
+ * Disable beaconing while we are reloading the beacon data,
+ * otherwise we might be sending out invalid data.
+ */
+ rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+ /*
+ * Clear beacon.
+ */
+ rt2800_clear_beacon_register(rt2x00dev,
+ HW_BEACON_OFFSET(entry->entry_idx));
+
+ /*
+ * Enabled beaconing again.
+ */
+ rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+}
+EXPORT_SYMBOL_GPL(rt2800_clear_beacon);
+
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
const struct rt2x00debug rt2800_rt2x00debug = {
.owner = THIS_MODULE,
@@ -1005,7 +1030,7 @@ static void rt2800_config_wcid_attr(struct rt2x00_dev *rt2x00dev,
memset(&wcid_entry, 0, sizeof(wcid_entry));
if (crypto->cmd == SET_KEY)
- memcpy(&wcid_entry, crypto->address, ETH_ALEN);
+ memcpy(wcid_entry.mac, crypto->address, ETH_ALEN);
rt2800_register_multiwrite(rt2x00dev, offset,
&wcid_entry, sizeof(wcid_entry));
}
@@ -1155,29 +1180,11 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
if (flags & CONFIG_UPDATE_TYPE) {
/*
- * Clear current synchronisation setup.
- */
- rt2800_clear_beacon(rt2x00dev,
- HW_BEACON_OFFSET(intf->beacon->entry_idx));
- /*
* Enable synchronisation.
*/
rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync);
- rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE,
- (conf->sync == TSF_SYNC_ADHOC ||
- conf->sync == TSF_SYNC_AP_NONE));
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
- /*
- * Enable pre tbtt interrupt for beaconing modes
- */
- rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg);
- rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER,
- (conf->sync == TSF_SYNC_AP_NONE));
- rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg);
-
}
if (flags & CONFIG_UPDATE_MAC) {
@@ -2187,19 +2194,23 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
/*
* Clear all beacons
*/
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE0);
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE1);
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE2);
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE3);
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE4);
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE5);
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE6);
- rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE7);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE0);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE1);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE2);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE3);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE4);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE5);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE6);
+ rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE7);
if (rt2x00_is_usb(rt2x00dev)) {
rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg);
rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 30);
rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
+ } else if (rt2x00_is_pcie(rt2x00dev)) {
+ rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg);
+ rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 125);
+ rt2800_register_write(rt2x00dev, US_CYC_CNT, reg);
}
rt2800_register_read(rt2x00dev, HT_FBK_CFG0, &reg);
@@ -2436,6 +2447,10 @@ static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40);
rt2800_bbp_write(rt2x00dev, 4, bbp);
+ rt2800_rfcsr_read(rt2x00dev, 31, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR31_RX_H20M, bw40);
+ rt2800_rfcsr_write(rt2x00dev, 31, rfcsr);
+
rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1);
rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
@@ -2510,7 +2525,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
- rt2800_rfcsr_write(rt2x00dev, 7, 0x70);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0x60);
rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
rt2800_rfcsr_write(rt2x00dev, 10, 0x41);
rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
@@ -2602,12 +2617,12 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
} else if (rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090)) {
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
+
rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1);
rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
- rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
-
rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
@@ -2619,6 +2634,10 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0);
}
rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
+
+ rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
+ rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
+ rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
} else if (rt2x00_rt(rt2x00dev, RT3390)) {
rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
@@ -2670,10 +2689,11 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
- if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
+ if (rt2x00_rt(rt2x00dev, RT3070) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
- if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
+ if (!test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
}
rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
@@ -2686,6 +2706,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt(rt2x00dev, RT3090)) {
rt2800_bbp_read(rt2x00dev, 138, &bbp);
+ /* Turn off unused DAC1 and ADC1 to reduce power consumption */
rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0);
@@ -2719,10 +2740,9 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 21, rfcsr);
}
- if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3071)) {
+ if (rt2x00_rt(rt2x00dev, RT3070)) {
rt2800_rfcsr_read(rt2x00dev, 27, &rfcsr);
- if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
- rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E))
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F))
rt2x00_set_field8(&rfcsr, RFCSR27_R1, 3);
else
rt2x00_set_field8(&rfcsr, RFCSR27_R1, 0);
@@ -2810,10 +2830,7 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
- rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
- rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
- rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
/* Wait for DMA, ignore error */
@@ -2823,9 +2840,6 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 0);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
-
- rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
- rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
}
EXPORT_SYMBOL_GPL(rt2800_disable_radio);
@@ -3530,7 +3544,8 @@ EXPORT_SYMBOL_GPL(rt2800_get_tsf);
int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
{
int ret = 0;
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index e3c995a..0c92d86 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -156,6 +156,7 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev);
void rt2800_txdone_entry(struct queue_entry *entry, u32 status);
void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
+void rt2800_clear_beacon(struct queue_entry *entry);
extern const struct rt2x00debug rt2800_rt2x00debug;
@@ -198,7 +199,8 @@ int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
u64 rt2800_get_tsf(struct ieee80211_hw *hw);
int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size);
int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index aa97971..8f4dfc3 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -200,11 +200,22 @@ static void rt2800pci_start_queue(struct data_queue *queue)
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
break;
case QID_BEACON:
+ /*
+ * Allow beacon tasklets to be scheduled for periodic
+ * beacon updates.
+ */
+ tasklet_enable(&rt2x00dev->tbtt_tasklet);
+ tasklet_enable(&rt2x00dev->pretbtt_tasklet);
+
rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+ rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg);
+ rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
+ rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg);
break;
default:
break;
@@ -250,6 +261,16 @@ static void rt2800pci_stop_queue(struct data_queue *queue)
rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+ rt2800_register_read(rt2x00dev, INT_TIMER_EN, &reg);
+ rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
+ rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg);
+
+ /*
+ * Wait for tbtt tasklets to finish.
+ */
+ tasklet_disable(&rt2x00dev->tbtt_tasklet);
+ tasklet_disable(&rt2x00dev->pretbtt_tasklet);
break;
default:
break;
@@ -397,9 +418,9 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
- int mask = (state == STATE_RADIO_IRQ_ON) ||
- (state == STATE_RADIO_IRQ_ON_ISR);
+ int mask = (state == STATE_RADIO_IRQ_ON);
u32 reg;
+ unsigned long flags;
/*
* When interrupts are being enabled, the interrupt registers
@@ -408,8 +429,17 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
if (state == STATE_RADIO_IRQ_ON) {
rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
+
+ /*
+ * Enable tasklets. The beacon related tasklets are
+ * enabled when the beacon queue is started.
+ */
+ tasklet_enable(&rt2x00dev->txstatus_tasklet);
+ tasklet_enable(&rt2x00dev->rxdone_tasklet);
+ tasklet_enable(&rt2x00dev->autowake_tasklet);
}
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0);
rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0);
@@ -430,6 +460,17 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+ if (state == STATE_RADIO_IRQ_OFF) {
+ /*
+ * Ensure that all tasklets are finished before
+ * disabling the interrupts.
+ */
+ tasklet_disable(&rt2x00dev->txstatus_tasklet);
+ tasklet_disable(&rt2x00dev->rxdone_tasklet);
+ tasklet_disable(&rt2x00dev->autowake_tasklet);
+ }
}
static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
@@ -475,39 +516,23 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
{
- u32 reg;
-
- rt2800_disable_radio(rt2x00dev);
-
- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280);
-
- rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
- rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
- rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
-
- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
- rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
+ if (rt2x00_is_soc(rt2x00dev)) {
+ rt2800_disable_radio(rt2x00dev);
+ rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
+ rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
+ }
}
static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
- /*
- * Always put the device to sleep (even when we intend to wakeup!)
- * if the device is booting and wasn't asleep it will return
- * failure when attempting to wakeup.
- */
- rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0xff, 2);
-
if (state == STATE_AWAKE) {
- rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0);
+ rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0x02);
rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP);
+ } else if (state == STATE_SLEEP) {
+ rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, 0xffffffff);
+ rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, 0xffffffff);
+ rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0x01, 0xff, 0x01);
}
return 0;
@@ -538,9 +563,7 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
rt2800pci_set_state(rt2x00dev, STATE_SLEEP);
break;
case STATE_RADIO_IRQ_ON:
- case STATE_RADIO_IRQ_ON_ISR:
case STATE_RADIO_IRQ_OFF:
- case STATE_RADIO_IRQ_OFF_ISR:
rt2800pci_toggle_irq(rt2x00dev, state);
break;
case STATE_DEEP_SLEEP:
@@ -652,6 +675,12 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
*/
rxdesc->flags |= RX_FLAG_IV_STRIPPED;
+ /*
+ * The hardware has already checked the Michael Mic and has
+ * stripped it from the frame. Signal this to mac80211.
+ */
+ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
rxdesc->flags |= RX_FLAG_DECRYPTED;
else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
@@ -726,45 +755,60 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
}
}
-static void rt2800pci_txstatus_tasklet(unsigned long data)
+static void rt2800pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00_field32 irq_field)
{
- rt2800pci_txdone((struct rt2x00_dev *)data);
-}
-
-static irqreturn_t rt2800pci_interrupt_thread(int irq, void *dev_instance)
-{
- struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg = rt2x00dev->irqvalue[0];
+ unsigned long flags;
+ u32 reg;
/*
- * 1 - Pre TBTT interrupt.
+ * Enable a single interrupt. The interrupt mask register
+ * access needs locking.
*/
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
- rt2x00lib_pretbtt(rt2x00dev);
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+ rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+ rt2x00_set_field32(&reg, irq_field, 1);
+ rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+}
- /*
- * 2 - Beacondone interrupt.
- */
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
- rt2x00lib_beacondone(rt2x00dev);
+static void rt2800pci_txstatus_tasklet(unsigned long data)
+{
+ rt2800pci_txdone((struct rt2x00_dev *)data);
/*
- * 3 - Rx ring done interrupt.
+ * No need to enable the tx status interrupt here as we always
+ * leave it enabled to minimize the possibility of a tx status
+ * register overflow. See comment in interrupt handler.
*/
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
- rt2x00pci_rxdone(rt2x00dev);
+}
- /*
- * 4 - Auto wakeup interrupt.
- */
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
- rt2800pci_wakeup(rt2x00dev);
+static void rt2800pci_pretbtt_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00lib_pretbtt(rt2x00dev);
+ rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
+}
- /* Enable interrupts again. */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_ON_ISR);
+static void rt2800pci_tbtt_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00lib_beacondone(rt2x00dev);
+ rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
+}
- return IRQ_HANDLED;
+static void rt2800pci_rxdone_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00pci_rxdone(rt2x00dev);
+ rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
+}
+
+static void rt2800pci_autowake_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2800pci_wakeup(rt2x00dev);
+ rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_AUTO_WAKEUP);
}
static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
@@ -810,8 +854,8 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg;
- irqreturn_t ret = IRQ_HANDLED;
+ u32 reg, mask;
+ unsigned long flags;
/* Read status and ACK all interrupts */
rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
@@ -823,38 +867,44 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
return IRQ_HANDLED;
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS))
- rt2800pci_txstatus_interrupt(rt2x00dev);
+ /*
+ * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
+ * for interrupts and interrupt masks we can just use the value of
+ * INT_SOURCE_CSR to create the interrupt mask.
+ */
+ mask = ~reg;
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT) ||
- rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT) ||
- rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE) ||
- rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP)) {
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
+ rt2800pci_txstatus_interrupt(rt2x00dev);
/*
- * All other interrupts are handled in the interrupt thread.
- * Store irqvalue for use in the interrupt thread.
+ * Never disable the TX_FIFO_STATUS interrupt.
*/
- rt2x00dev->irqvalue[0] = reg;
+ rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
+ }
- /*
- * Disable interrupts, will be enabled again in the
- * interrupt thread.
- */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_OFF_ISR);
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
+ tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
- /*
- * Leave the TX_FIFO_STATUS interrupt enabled to not lose any
- * tx status reports.
- */
- rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
- rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
- rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
+ tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
- ret = IRQ_WAKE_THREAD;
- }
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
+ tasklet_schedule(&rt2x00dev->rxdone_tasklet);
- return ret;
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
+ tasklet_schedule(&rt2x00dev->autowake_tasklet);
+
+ /*
+ * Disable all interrupts for which a tasklet was scheduled right now,
+ * the tasklet will reenable the appropriate interrupts.
+ */
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+ rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+ reg &= mask;
+ rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+ return IRQ_HANDLED;
}
/*
@@ -969,8 +1019,11 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = {
static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.irq_handler = rt2800pci_interrupt,
- .irq_handler_thread = rt2800pci_interrupt_thread,
- .txstatus_tasklet = rt2800pci_txstatus_tasklet,
+ .txstatus_tasklet = rt2800pci_txstatus_tasklet,
+ .pretbtt_tasklet = rt2800pci_pretbtt_tasklet,
+ .tbtt_tasklet = rt2800pci_tbtt_tasklet,
+ .rxdone_tasklet = rt2800pci_rxdone_tasklet,
+ .autowake_tasklet = rt2800pci_autowake_tasklet,
.probe_hw = rt2800pci_probe_hw,
.get_firmware_name = rt2800pci_get_firmware_name,
.check_firmware = rt2800_check_firmware,
@@ -990,6 +1043,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.write_tx_desc = rt2800pci_write_tx_desc,
.write_tx_data = rt2800_write_tx_data,
.write_beacon = rt2800_write_beacon,
+ .clear_beacon = rt2800_clear_beacon,
.fill_rxdone = rt2800pci_fill_rxdone,
.config_shared_key = rt2800_config_shared_key,
.config_pairwise_key = rt2800_config_pairwise_key,
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index b97a4a5..5d91561 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -253,9 +253,7 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
rt2800usb_set_state(rt2x00dev, STATE_SLEEP);
break;
case STATE_RADIO_IRQ_ON:
- case STATE_RADIO_IRQ_ON_ISR:
case STATE_RADIO_IRQ_OFF:
- case STATE_RADIO_IRQ_OFF_ISR:
/* No support, but no error either */
break;
case STATE_DEEP_SLEEP:
@@ -486,6 +484,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
*/
rxdesc->flags |= RX_FLAG_IV_STRIPPED;
+ /*
+ * The hardware has already checked the Michael Mic and has
+ * stripped it from the frame. Signal this to mac80211.
+ */
+ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
rxdesc->flags |= RX_FLAG_DECRYPTED;
else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
@@ -633,6 +637,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
.write_tx_desc = rt2800usb_write_tx_desc,
.write_tx_data = rt2800usb_write_tx_data,
.write_beacon = rt2800_write_beacon,
+ .clear_beacon = rt2800_clear_beacon,
.get_tx_data_len = rt2800usb_get_tx_data_len,
.fill_rxdone = rt2800usb_fill_rxdone,
.config_shared_key = rt2800_config_shared_key,
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 84aaf39..39bc2fa 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -368,6 +368,7 @@ struct rt2x00_intf {
* dedicated beacon entry.
*/
struct queue_entry *beacon;
+ bool enable_beacon;
/*
* Actions that needed rescheduling.
@@ -511,14 +512,13 @@ struct rt2x00lib_ops {
irq_handler_t irq_handler;
/*
- * Threaded Interrupt handlers.
- */
- irq_handler_t irq_handler_thread;
-
- /*
* TX status tasklet handler.
*/
void (*txstatus_tasklet) (unsigned long data);
+ void (*pretbtt_tasklet) (unsigned long data);
+ void (*tbtt_tasklet) (unsigned long data);
+ void (*rxdone_tasklet) (unsigned long data);
+ void (*autowake_tasklet) (unsigned long data);
/*
* Device init handlers.
@@ -573,6 +573,7 @@ struct rt2x00lib_ops {
struct txentry_desc *txdesc);
void (*write_beacon) (struct queue_entry *entry,
struct txentry_desc *txdesc);
+ void (*clear_beacon) (struct queue_entry *entry);
int (*get_tx_data_len) (struct queue_entry *entry);
/*
@@ -788,10 +789,12 @@ struct rt2x00_dev {
* - Open ap interface count.
* - Open sta interface count.
* - Association count.
+ * - Beaconing enabled count.
*/
unsigned int intf_ap_count;
unsigned int intf_sta_count;
unsigned int intf_associated;
+ unsigned int intf_beaconing;
/*
* Link quality
@@ -857,6 +860,13 @@ struct rt2x00_dev {
*/
struct ieee80211_low_level_stats low_level_stats;
+ /**
+ * Work queue for all work which should not be placed
+ * on the mac80211 workqueue (because of dependencies
+ * between various work structures).
+ */
+ struct workqueue_struct *workqueue;
+
/*
* Scheduled work.
* NOTE: intf_work will use ieee80211_iterate_active_interfaces()
@@ -887,12 +897,6 @@ struct rt2x00_dev {
const struct firmware *fw;
/*
- * Interrupt values, stored between interrupt service routine
- * and interrupt thread routine.
- */
- u32 irqvalue[2];
-
- /*
* FIFO for storing tx status reports between isr and tasklet.
*/
DECLARE_KFIFO_PTR(txstatus_fifo, u32);
@@ -901,6 +905,15 @@ struct rt2x00_dev {
* Tasklet for processing tx status reports (rt2800pci).
*/
struct tasklet_struct txstatus_tasklet;
+ struct tasklet_struct pretbtt_tasklet;
+ struct tasklet_struct tbtt_tasklet;
+ struct tasklet_struct rxdone_tasklet;
+ struct tasklet_struct autowake_tasklet;
+
+ /*
+ * Protect the interrupt mask register.
+ */
+ spinlock_t irqmask_lock;
};
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 9597a03..9de9dbe 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -121,7 +121,7 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
return;
if (test_and_clear_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags))
- rt2x00queue_update_beacon(rt2x00dev, vif, true);
+ rt2x00queue_update_beacon(rt2x00dev, vif);
}
static void rt2x00lib_intf_scheduled(struct work_struct *work)
@@ -174,7 +174,13 @@ static void rt2x00lib_beaconupdate_iter(void *data, u8 *mac,
vif->type != NL80211_IFTYPE_WDS)
return;
- rt2x00queue_update_beacon(rt2x00dev, vif, true);
+ /*
+ * Update the beacon without locking. This is safe on PCI devices
+ * as they only update the beacon periodically here. This should
+ * never be called for USB devices.
+ */
+ WARN_ON(rt2x00_is_usb(rt2x00dev));
+ rt2x00queue_update_beacon_locked(rt2x00dev, vif);
}
void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
@@ -183,9 +189,9 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
return;
/* send buffered bc/mc frames out for every bssid */
- ieee80211_iterate_active_interfaces(rt2x00dev->hw,
- rt2x00lib_bc_buffer_iter,
- rt2x00dev);
+ ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
+ rt2x00lib_bc_buffer_iter,
+ rt2x00dev);
/*
* Devices with pre tbtt interrupt don't need to update the beacon
* here as they will fetch the next beacon directly prior to
@@ -195,9 +201,9 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
return;
/* fetch next beacon */
- ieee80211_iterate_active_interfaces(rt2x00dev->hw,
- rt2x00lib_beaconupdate_iter,
- rt2x00dev);
+ ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
+ rt2x00lib_beaconupdate_iter,
+ rt2x00dev);
}
EXPORT_SYMBOL_GPL(rt2x00lib_beacondone);
@@ -207,9 +213,9 @@ void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev)
return;
/* fetch next beacon */
- ieee80211_iterate_active_interfaces(rt2x00dev->hw,
- rt2x00lib_beaconupdate_iter,
- rt2x00dev);
+ ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw,
+ rt2x00lib_beaconupdate_iter,
+ rt2x00dev);
}
EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt);
@@ -649,7 +655,10 @@ static void rt2x00lib_channel(struct ieee80211_channel *entry,
const int channel, const int tx_power,
const int value)
{
- entry->center_freq = ieee80211_channel_to_frequency(channel);
+ /* XXX: this assumption about the band is wrong for 802.11j */
+ entry->band = channel <= 14 ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ entry->center_freq = ieee80211_channel_to_frequency(channel,
+ entry->band);
entry->hw_value = value;
entry->max_power = tx_power;
entry->max_antenna_gain = 0xff;
@@ -812,15 +821,29 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
GFP_KERNEL);
if (status)
return status;
+ }
- /* tasklet for processing the tx status reports. */
- if (rt2x00dev->ops->lib->txstatus_tasklet)
- tasklet_init(&rt2x00dev->txstatus_tasklet,
- rt2x00dev->ops->lib->txstatus_tasklet,
- (unsigned long)rt2x00dev);
-
+ /*
+ * Initialize tasklets if used by the driver. Tasklets are
+ * disabled until the interrupts are turned on. The driver
+ * has to handle that.
+ */
+#define RT2X00_TASKLET_INIT(taskletname) \
+ if (rt2x00dev->ops->lib->taskletname) { \
+ tasklet_init(&rt2x00dev->taskletname, \
+ rt2x00dev->ops->lib->taskletname, \
+ (unsigned long)rt2x00dev); \
+ tasklet_disable(&rt2x00dev->taskletname); \
}
+ RT2X00_TASKLET_INIT(txstatus_tasklet);
+ RT2X00_TASKLET_INIT(pretbtt_tasklet);
+ RT2X00_TASKLET_INIT(tbtt_tasklet);
+ RT2X00_TASKLET_INIT(rxdone_tasklet);
+ RT2X00_TASKLET_INIT(autowake_tasklet);
+
+#undef RT2X00_TASKLET_INIT
+
/*
* Register HW.
*/
@@ -949,6 +972,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
{
int retval = -ENOMEM;
+ spin_lock_init(&rt2x00dev->irqmask_lock);
mutex_init(&rt2x00dev->csr_mutex);
set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
@@ -973,8 +997,15 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
BIT(NL80211_IFTYPE_WDS);
/*
- * Initialize configuration work.
+ * Initialize work.
*/
+ rt2x00dev->workqueue =
+ alloc_ordered_workqueue(wiphy_name(rt2x00dev->hw->wiphy), 0);
+ if (!rt2x00dev->workqueue) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
/*
@@ -1033,6 +1064,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
cancel_work_sync(&rt2x00dev->intf_work);
cancel_work_sync(&rt2x00dev->rxdone_work);
cancel_work_sync(&rt2x00dev->txdone_work);
+ destroy_workqueue(rt2x00dev->workqueue);
/*
* Free the tx status fifo.
@@ -1043,6 +1075,10 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
* Kill the tx status tasklet.
*/
tasklet_kill(&rt2x00dev->txstatus_tasklet);
+ tasklet_kill(&rt2x00dev->pretbtt_tasklet);
+ tasklet_kill(&rt2x00dev->tbtt_tasklet);
+ tasklet_kill(&rt2x00dev->rxdone_tasklet);
+ tasklet_kill(&rt2x00dev->autowake_tasklet);
/*
* Uninitialize device.
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index a105c50..2d94cba 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -157,14 +157,30 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
bool local);
/**
- * rt2x00queue_update_beacon - Send new beacon from mac80211 to hardware
+ * rt2x00queue_update_beacon - Send new beacon from mac80211
+ * to hardware. Handles locking by itself (mutex).
* @rt2x00dev: Pointer to &struct rt2x00_dev.
* @vif: Interface for which the beacon should be updated.
- * @enable_beacon: Enable beaconing
*/
int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
- struct ieee80211_vif *vif,
- const bool enable_beacon);
+ struct ieee80211_vif *vif);
+
+/**
+ * rt2x00queue_update_beacon_locked - Send new beacon from mac80211
+ * to hardware. Caller needs to ensure locking.
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ * @vif: Interface for which the beacon should be updated.
+ */
+int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_vif *vif);
+
+/**
+ * rt2x00queue_clear_beacon - Clear beacon in hardware
+ * @rt2x00dev: Pointer to &struct rt2x00_dev.
+ * @vif: Interface for which the beacon should be updated.
+ */
+int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_vif *vif);
/**
* rt2x00queue_index_inc - Index incrementation function
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index bfda60e..c975b0a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -417,7 +417,8 @@ void rt2x00link_start_watchdog(struct rt2x00_dev *rt2x00dev)
!test_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags))
return;
- schedule_delayed_work(&link->watchdog_work, WATCHDOG_INTERVAL);
+ ieee80211_queue_delayed_work(rt2x00dev->hw,
+ &link->watchdog_work, WATCHDOG_INTERVAL);
}
void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev)
@@ -441,7 +442,9 @@ static void rt2x00link_watchdog(struct work_struct *work)
rt2x00dev->ops->lib->watchdog(rt2x00dev);
if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
- schedule_delayed_work(&link->watchdog_work, WATCHDOG_INTERVAL);
+ ieee80211_queue_delayed_work(rt2x00dev->hw,
+ &link->watchdog_work,
+ WATCHDOG_INTERVAL);
}
void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index f3da051..6a66021 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -617,11 +617,47 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->bssid);
/*
- * Update the beacon.
+ * Update the beacon. This is only required on USB devices. PCI
+ * devices fetch beacons periodically.
*/
- if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED))
- rt2x00queue_update_beacon(rt2x00dev, vif,
- bss_conf->enable_beacon);
+ if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev))
+ rt2x00queue_update_beacon(rt2x00dev, vif);
+
+ /*
+ * Start/stop beaconing.
+ */
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ if (!bss_conf->enable_beacon && intf->enable_beacon) {
+ rt2x00queue_clear_beacon(rt2x00dev, vif);
+ rt2x00dev->intf_beaconing--;
+ intf->enable_beacon = false;
+
+ if (rt2x00dev->intf_beaconing == 0) {
+ /*
+ * Last beaconing interface disabled
+ * -> stop beacon queue.
+ */
+ mutex_lock(&intf->beacon_skb_mutex);
+ rt2x00queue_stop_queue(rt2x00dev->bcn);
+ mutex_unlock(&intf->beacon_skb_mutex);
+ }
+
+
+ } else if (bss_conf->enable_beacon && !intf->enable_beacon) {
+ rt2x00dev->intf_beaconing++;
+ intf->enable_beacon = true;
+
+ if (rt2x00dev->intf_beaconing == 1) {
+ /*
+ * First beaconing interface enabled
+ * -> start beacon queue.
+ */
+ mutex_lock(&intf->beacon_skb_mutex);
+ rt2x00queue_start_queue(rt2x00dev->bcn);
+ mutex_unlock(&intf->beacon_skb_mutex);
+ }
+ }
+ }
/*
* When the association status has changed we must reset the link
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index ace0b66..4dd82b0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -160,10 +160,9 @@ int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
/*
* Register interrupt handler.
*/
- status = request_threaded_irq(rt2x00dev->irq,
- rt2x00dev->ops->lib->irq_handler,
- rt2x00dev->ops->lib->irq_handler_thread,
- IRQF_SHARED, rt2x00dev->name, rt2x00dev);
+ status = request_irq(rt2x00dev->irq,
+ rt2x00dev->ops->lib->irq_handler,
+ IRQF_SHARED, rt2x00dev->name, rt2x00dev);
if (status) {
ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
rt2x00dev->irq, status);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index ca82b3a..fa17c83 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -566,13 +566,10 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
return 0;
}
-int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
- struct ieee80211_vif *vif,
- const bool enable_beacon)
+int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_vif *vif)
{
struct rt2x00_intf *intf = vif_to_intf(vif);
- struct skb_frame_desc *skbdesc;
- struct txentry_desc txdesc;
if (unlikely(!intf->beacon))
return -ENOBUFS;
@@ -584,17 +581,36 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
*/
rt2x00queue_free_skb(intf->beacon);
- if (!enable_beacon) {
- rt2x00queue_stop_queue(intf->beacon->queue);
- mutex_unlock(&intf->beacon_skb_mutex);
- return 0;
- }
+ /*
+ * Clear beacon (single bssid devices don't need to clear the beacon
+ * since the beacon queue will get stopped anyway).
+ */
+ if (rt2x00dev->ops->lib->clear_beacon)
+ rt2x00dev->ops->lib->clear_beacon(intf->beacon);
+
+ mutex_unlock(&intf->beacon_skb_mutex);
+
+ return 0;
+}
+
+int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_vif *vif)
+{
+ struct rt2x00_intf *intf = vif_to_intf(vif);
+ struct skb_frame_desc *skbdesc;
+ struct txentry_desc txdesc;
+
+ if (unlikely(!intf->beacon))
+ return -ENOBUFS;
+
+ /*
+ * Clean up the beacon skb.
+ */
+ rt2x00queue_free_skb(intf->beacon);
intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
- if (!intf->beacon->skb) {
- mutex_unlock(&intf->beacon_skb_mutex);
+ if (!intf->beacon->skb)
return -ENOMEM;
- }
/*
* Copy all TX descriptor information into txdesc,
@@ -611,13 +627,25 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
skbdesc->entry = intf->beacon;
/*
- * Send beacon to hardware and enable beacon genaration..
+ * Send beacon to hardware.
*/
rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
+ return 0;
+
+}
+
+int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_vif *vif)
+{
+ struct rt2x00_intf *intf = vif_to_intf(vif);
+ int ret;
+
+ mutex_lock(&intf->beacon_skb_mutex);
+ ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif);
mutex_unlock(&intf->beacon_skb_mutex);
- return 0;
+ return ret;
}
void rt2x00queue_for_each_entry(struct data_queue *queue,
@@ -885,7 +913,7 @@ void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
* The queue flush has failed...
*/
if (unlikely(!rt2x00queue_empty(queue)))
- WARNING(queue->rt2x00dev, "Queue %d failed to flush", queue->qid);
+ WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid);
/*
* Restore the queue to the previous status
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index e8259ae..6f867ee 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -85,8 +85,6 @@ enum dev_state {
STATE_RADIO_OFF,
STATE_RADIO_IRQ_ON,
STATE_RADIO_IRQ_OFF,
- STATE_RADIO_IRQ_ON_ISR,
- STATE_RADIO_IRQ_OFF_ISR,
};
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 1a9937d..fbe735f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -227,7 +227,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
* Schedule the delayed work for reading the TX status
* from the device.
*/
- ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work);
+ queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
}
static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
@@ -320,7 +320,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
* Schedule the delayed work for reading the RX status
* from the device.
*/
- ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work);
+ queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work);
}
static void rt2x00usb_kick_rx_entry(struct queue_entry *entry)
@@ -429,7 +429,7 @@ void rt2x00usb_flush_queue(struct data_queue *queue)
* Schedule the completion handler manually, when this
* worker function runs, it should cleanup the queue.
*/
- ieee80211_queue_work(queue->rt2x00dev->hw, completion);
+ queue_work(queue->rt2x00dev->workqueue, completion);
/*
* Wait for a little while to give the driver
@@ -453,7 +453,7 @@ static void rt2x00usb_watchdog_tx_status(struct data_queue *queue)
WARNING(queue->rt2x00dev, "TX queue %d status timed out,"
" invoke forced tx handler\n", queue->qid);
- ieee80211_queue_work(queue->rt2x00dev->hw, &queue->rt2x00dev->txdone_work);
+ queue_work(queue->rt2x00dev->workqueue, &queue->rt2x00dev->txdone_work);
}
void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 8de44dd..dd2164d 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -551,26 +551,14 @@ static void rt61pci_config_intf(struct rt2x00_dev *rt2x00dev,
struct rt2x00intf_conf *conf,
const unsigned int flags)
{
- unsigned int beacon_base;
u32 reg;
if (flags & CONFIG_UPDATE_TYPE) {
/*
- * Clear current synchronisation setup.
- * For the Beacon base registers, we only need to clear
- * the first byte since that byte contains the VALID and OWNER
- * bits which (when set to 0) will invalidate the entire beacon.
- */
- beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
- rt2x00pci_register_write(rt2x00dev, beacon_base, 0);
-
- /*
* Enable synchronisation.
*/
rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
- rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync);
- rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
}
@@ -1154,6 +1142,11 @@ static void rt61pci_start_queue(struct data_queue *queue)
rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);
break;
case QID_BEACON:
+ /*
+ * Allow the tbtt tasklet to be scheduled.
+ */
+ tasklet_enable(&rt2x00dev->tbtt_tasklet);
+
rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
@@ -1233,6 +1226,11 @@ static void rt61pci_stop_queue(struct data_queue *queue)
rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0);
rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
+
+ /*
+ * Wait for possibly running tbtt tasklets.
+ */
+ tasklet_disable(&rt2x00dev->tbtt_tasklet);
break;
default:
break;
@@ -1719,9 +1717,9 @@ static int rt61pci_init_bbp(struct rt2x00_dev *rt2x00dev)
static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
- int mask = (state == STATE_RADIO_IRQ_OFF) ||
- (state == STATE_RADIO_IRQ_OFF_ISR);
+ int mask = (state == STATE_RADIO_IRQ_OFF);
u32 reg;
+ unsigned long flags;
/*
* When interrupts are being enabled, the interrupt registers
@@ -1733,12 +1731,21 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, &reg);
rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg);
+
+ /*
+ * Enable tasklets.
+ */
+ tasklet_enable(&rt2x00dev->txstatus_tasklet);
+ tasklet_enable(&rt2x00dev->rxdone_tasklet);
+ tasklet_enable(&rt2x00dev->autowake_tasklet);
}
/*
* Only toggle the interrupts bits we are going to use.
* Non-checked interrupt bits are disabled by default.
*/
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
rt2x00_set_field32(&reg, INT_MASK_CSR_TXDONE, mask);
rt2x00_set_field32(&reg, INT_MASK_CSR_RXDONE, mask);
@@ -1758,6 +1765,17 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_7, mask);
rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_TWAKEUP, mask);
rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
+
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+ if (state == STATE_RADIO_IRQ_OFF) {
+ /*
+ * Ensure that all tasklets are finished.
+ */
+ tasklet_disable(&rt2x00dev->txstatus_tasklet);
+ tasklet_disable(&rt2x00dev->rxdone_tasklet);
+ tasklet_disable(&rt2x00dev->autowake_tasklet);
+ }
}
static int rt61pci_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -1833,9 +1851,7 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev,
rt61pci_disable_radio(rt2x00dev);
break;
case STATE_RADIO_IRQ_ON:
- case STATE_RADIO_IRQ_ON_ISR:
case STATE_RADIO_IRQ_OFF:
- case STATE_RADIO_IRQ_OFF_ISR:
rt61pci_toggle_irq(rt2x00dev, state);
break;
case STATE_DEEP_SLEEP:
@@ -2002,8 +2018,6 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
*/
rt2x00pci_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
- rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
@@ -2014,6 +2028,32 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
entry->skb = NULL;
}
+static void rt61pci_clear_beacon(struct queue_entry *entry)
+{
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ u32 reg;
+
+ /*
+ * Disable beaconing while we are reloading the beacon data,
+ * otherwise we might be sending out invalid data.
+ */
+ rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
+ rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
+
+ /*
+ * Clear beacon.
+ */
+ rt2x00pci_register_write(rt2x00dev,
+ HW_BEACON_OFFSET(entry->entry_idx), 0);
+
+ /*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
+ rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
+}
+
/*
* RX control handlers
*/
@@ -2078,9 +2118,8 @@ static void rt61pci_fill_rxdone(struct queue_entry *entry,
rxdesc->flags |= RX_FLAG_IV_STRIPPED;
/*
- * FIXME: Legacy driver indicates that the frame does
- * contain the Michael Mic. Unfortunately, in rt2x00
- * the MIC seems to be missing completely...
+ * The hardware has already checked the Michael Mic and has
+ * stripped it from the frame. Signal this to mac80211.
*/
rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
@@ -2211,61 +2250,80 @@ static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
}
-static irqreturn_t rt61pci_interrupt_thread(int irq, void *dev_instance)
+static void rt61pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00_field32 irq_field)
{
- struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg = rt2x00dev->irqvalue[0];
- u32 reg_mcu = rt2x00dev->irqvalue[1];
+ unsigned long flags;
+ u32 reg;
/*
- * Handle interrupts, walk through all bits
- * and run the tasks, the bits are checked in order of
- * priority.
+ * Enable a single interrupt. The interrupt mask register
+ * access needs locking.
*/
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
- /*
- * 1 - Rx ring done interrupt.
- */
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RXDONE))
- rt2x00pci_rxdone(rt2x00dev);
+ rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+ rt2x00_set_field32(&reg, irq_field, 0);
+ rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
- /*
- * 2 - Tx ring done interrupt.
- */
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TXDONE))
- rt61pci_txdone(rt2x00dev);
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+}
- /*
- * 3 - Handle MCU command done.
- */
- if (reg_mcu)
- rt2x00pci_register_write(rt2x00dev,
- M2H_CMD_DONE_CSR, 0xffffffff);
+static void rt61pci_enable_mcu_interrupt(struct rt2x00_dev *rt2x00dev,
+ struct rt2x00_field32 irq_field)
+{
+ unsigned long flags;
+ u32 reg;
/*
- * 4 - MCU Autowakeup interrupt.
+ * Enable a single MCU interrupt. The interrupt mask register
+ * access needs locking.
*/
- if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP))
- rt61pci_wakeup(rt2x00dev);
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
- /*
- * 5 - Beacon done interrupt.
- */
- if (rt2x00_get_field32(reg, INT_SOURCE_CSR_BEACON_DONE))
- rt2x00lib_beacondone(rt2x00dev);
+ rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg);
+ rt2x00_set_field32(&reg, irq_field, 0);
+ rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
- /* Enable interrupts again. */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_ON_ISR);
- return IRQ_HANDLED;
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+}
+
+static void rt61pci_txstatus_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt61pci_txdone(rt2x00dev);
+ rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TXDONE);
+}
+
+static void rt61pci_tbtt_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00lib_beacondone(rt2x00dev);
+ rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_BEACON_DONE);
+}
+
+static void rt61pci_rxdone_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt2x00pci_rxdone(rt2x00dev);
+ rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RXDONE);
}
+static void rt61pci_autowake_tasklet(unsigned long data)
+{
+ struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ rt61pci_wakeup(rt2x00dev);
+ rt2x00pci_register_write(rt2x00dev,
+ M2H_CMD_DONE_CSR, 0xffffffff);
+ rt61pci_enable_mcu_interrupt(rt2x00dev, MCU_INT_MASK_CSR_TWAKEUP);
+}
static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
{
struct rt2x00_dev *rt2x00dev = dev_instance;
- u32 reg_mcu;
- u32 reg;
+ u32 reg_mcu, mask_mcu;
+ u32 reg, mask;
+ unsigned long flags;
/*
* Get the interrupt sources & saved to local variable.
@@ -2283,14 +2341,46 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
return IRQ_HANDLED;
- /* Store irqvalues for use in the interrupt thread. */
- rt2x00dev->irqvalue[0] = reg;
- rt2x00dev->irqvalue[1] = reg_mcu;
+ /*
+ * Schedule tasklets for interrupt handling.
+ */
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RXDONE))
+ tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TXDONE))
+ tasklet_schedule(&rt2x00dev->txstatus_tasklet);
- /* Disable interrupts, will be enabled again in the interrupt thread. */
- rt2x00dev->ops->lib->set_device_state(rt2x00dev,
- STATE_RADIO_IRQ_OFF_ISR);
- return IRQ_WAKE_THREAD;
+ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_BEACON_DONE))
+ tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
+
+ if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP))
+ tasklet_schedule(&rt2x00dev->autowake_tasklet);
+
+ /*
+ * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
+ * for interrupts and interrupt masks we can just use the value of
+ * INT_SOURCE_CSR to create the interrupt mask.
+ */
+ mask = reg;
+ mask_mcu = reg_mcu;
+
+ /*
+ * Disable all interrupts for which a tasklet was scheduled right now,
+ * the tasklet will reenable the appropriate interrupts.
+ */
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
+ rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+ reg |= mask;
+ rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
+
+ rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg);
+ reg |= mask_mcu;
+ rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg);
+
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+ return IRQ_HANDLED;
}
/*
@@ -2884,7 +2974,10 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
.irq_handler = rt61pci_interrupt,
- .irq_handler_thread = rt61pci_interrupt_thread,
+ .txstatus_tasklet = rt61pci_txstatus_tasklet,
+ .tbtt_tasklet = rt61pci_tbtt_tasklet,
+ .rxdone_tasklet = rt61pci_rxdone_tasklet,
+ .autowake_tasklet = rt61pci_autowake_tasklet,
.probe_hw = rt61pci_probe_hw,
.get_firmware_name = rt61pci_get_firmware_name,
.check_firmware = rt61pci_check_firmware,
@@ -2903,6 +2996,7 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
.stop_queue = rt61pci_stop_queue,
.write_tx_desc = rt61pci_write_tx_desc,
.write_beacon = rt61pci_write_beacon,
+ .clear_beacon = rt61pci_clear_beacon,
.fill_rxdone = rt61pci_fill_rxdone,
.config_shared_key = rt61pci_config_shared_key,
.config_pairwise_key = rt61pci_config_pairwise_key,
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 029be3c..5ff72de 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -502,26 +502,14 @@ static void rt73usb_config_intf(struct rt2x00_dev *rt2x00dev,
struct rt2x00intf_conf *conf,
const unsigned int flags)
{
- unsigned int beacon_base;
u32 reg;
if (flags & CONFIG_UPDATE_TYPE) {
/*
- * Clear current synchronisation setup.
- * For the Beacon base registers we only need to clear
- * the first byte since that byte contains the VALID and OWNER
- * bits which (when set to 0) will invalidate the entire beacon.
- */
- beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
- rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
-
- /*
* Enable synchronisation.
*/
rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
- rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync);
- rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
}
@@ -1440,9 +1428,7 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
rt73usb_disable_radio(rt2x00dev);
break;
case STATE_RADIO_IRQ_ON:
- case STATE_RADIO_IRQ_ON_ISR:
case STATE_RADIO_IRQ_OFF:
- case STATE_RADIO_IRQ_OFF_ISR:
/* No support, but no error either */
break;
case STATE_DEEP_SLEEP:
@@ -1590,8 +1576,6 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
*/
rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
- rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
- rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
@@ -1602,6 +1586,33 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
entry->skb = NULL;
}
+static void rt73usb_clear_beacon(struct queue_entry *entry)
+{
+ struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+ unsigned int beacon_base;
+ u32 reg;
+
+ /*
+ * Disable beaconing while we are reloading the beacon data,
+ * otherwise we might be sending out invalid data.
+ */
+ rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
+ rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
+
+ /*
+ * Clear beacon.
+ */
+ beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
+ rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
+
+ /*
+ * Enable beaconing again.
+ */
+ rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
+ rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
+}
+
static int rt73usb_get_tx_data_len(struct queue_entry *entry)
{
int length;
@@ -1698,9 +1709,8 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry,
rxdesc->flags |= RX_FLAG_IV_STRIPPED;
/*
- * FIXME: Legacy driver indicates that the frame does
- * contain the Michael Mic. Unfortunately, in rt2x00
- * the MIC seems to be missing completely...
+ * The hardware has already checked the Michael Mic and has
+ * stripped it from the frame. Signal this to mac80211.
*/
rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
@@ -2313,6 +2323,7 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
.flush_queue = rt2x00usb_flush_queue,
.write_tx_desc = rt73usb_write_tx_desc,
.write_beacon = rt73usb_write_beacon,
+ .clear_beacon = rt73usb_clear_beacon,
.get_tx_data_len = rt73usb_get_tx_data_len,
.fill_rxdone = rt73usb_fill_rxdone,
.config_shared_key = rt73usb_config_shared_key,
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index d6a924a..25d2d66 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -748,7 +748,8 @@ static void rtl_op_sta_notify(struct ieee80211_hw *hw,
static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 * ssn)
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/wl1251/acx.c b/drivers/net/wireless/wl1251/acx.c
index 64a0214..ef8370e 100644
--- a/drivers/net/wireless/wl1251/acx.c
+++ b/drivers/net/wireless/wl1251/acx.c
@@ -776,6 +776,31 @@ out:
return ret;
}
+int wl1251_acx_low_rssi(struct wl1251 *wl, s8 threshold, u8 weight,
+ u8 depth, enum wl1251_acx_low_rssi_type type)
+{
+ struct acx_low_rssi *rssi;
+ int ret;
+
+ wl1251_debug(DEBUG_ACX, "acx low rssi");
+
+ rssi = kzalloc(sizeof(*rssi), GFP_KERNEL);
+ if (!rssi)
+ return -ENOMEM;
+
+ rssi->threshold = threshold;
+ rssi->weight = weight;
+ rssi->depth = depth;
+ rssi->type = type;
+
+ ret = wl1251_cmd_configure(wl, ACX_LOW_RSSI, rssi, sizeof(*rssi));
+ if (ret < 0)
+ wl1251_warning("failed to set low rssi threshold: %d", ret);
+
+ kfree(rssi);
+ return ret;
+}
+
int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble)
{
struct acx_preamble *acx;
@@ -978,6 +1003,34 @@ out:
return ret;
}
+int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
+ u8 max_consecutive)
+{
+ struct wl1251_acx_bet_enable *acx;
+ int ret;
+
+ wl1251_debug(DEBUG_ACX, "acx bet enable");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->enable = mode;
+ acx->max_consecutive = max_consecutive;
+
+ ret = wl1251_cmd_configure(wl, ACX_BET_ENABLE, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1251_warning("wl1251 acx bet enable failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
u8 aifs, u16 txop)
{
diff --git a/drivers/net/wireless/wl1251/acx.h b/drivers/net/wireless/wl1251/acx.h
index efcc3aa..c2ba100 100644
--- a/drivers/net/wireless/wl1251/acx.h
+++ b/drivers/net/wireless/wl1251/acx.h
@@ -399,6 +399,49 @@ struct acx_rts_threshold {
u8 pad[2];
} __packed;
+enum wl1251_acx_low_rssi_type {
+ /*
+ * The event is a "Level" indication which keeps triggering
+ * as long as the average RSSI is below the threshold.
+ */
+ WL1251_ACX_LOW_RSSI_TYPE_LEVEL = 0,
+
+ /*
+ * The event is an "Edge" indication which triggers
+ * only when the RSSI threshold is crossed from above.
+ */
+ WL1251_ACX_LOW_RSSI_TYPE_EDGE = 1,
+};
+
+struct acx_low_rssi {
+ struct acx_header header;
+
+ /*
+ * The threshold (in dBm) below (or above after low rssi
+ * indication) which the firmware generates an interrupt to the
+ * host. This parameter is signed.
+ */
+ s8 threshold;
+
+ /*
+ * The weight of the current RSSI sample, before adding the new
+ * sample, that is used to calculate the average RSSI.
+ */
+ u8 weight;
+
+ /*
+ * The number of Beacons/Probe response frames that will be
+ * received before issuing the Low or Regained RSSI event.
+ */
+ u8 depth;
+
+ /*
+ * Configures how the Low RSSI Event is triggered. Refer to
+ * enum wl1251_acx_low_rssi_type for more.
+ */
+ u8 type;
+} __packed;
+
struct acx_beacon_filter_option {
struct acx_header header;
@@ -1164,6 +1207,31 @@ struct wl1251_acx_wr_tbtt_and_dtim {
u8 padding;
} __packed;
+enum wl1251_acx_bet_mode {
+ WL1251_ACX_BET_DISABLE = 0,
+ WL1251_ACX_BET_ENABLE = 1,
+};
+
+struct wl1251_acx_bet_enable {
+ struct acx_header header;
+
+ /*
+ * Specifies if beacon early termination procedure is enabled or
+ * disabled, see enum wl1251_acx_bet_mode.
+ */
+ u8 enable;
+
+ /*
+ * Specifies the maximum number of consecutive beacons that may be
+ * early terminated. After this number is reached at least one full
+ * beacon must be correctly received in FW before beacon ET
+ * resumes. Range 0 - 255.
+ */
+ u8 max_consecutive;
+
+ u8 padding[2];
+} __packed;
+
struct wl1251_acx_ac_cfg {
struct acx_header header;
@@ -1393,6 +1461,8 @@ int wl1251_acx_cca_threshold(struct wl1251 *wl);
int wl1251_acx_bcn_dtim_options(struct wl1251 *wl);
int wl1251_acx_aid(struct wl1251 *wl, u16 aid);
int wl1251_acx_event_mbox_mask(struct wl1251 *wl, u32 event_mask);
+int wl1251_acx_low_rssi(struct wl1251 *wl, s8 threshold, u8 weight,
+ u8 depth, enum wl1251_acx_low_rssi_type type);
int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble);
int wl1251_acx_cts_protect(struct wl1251 *wl,
enum acx_ctsprotect_type ctsprotect);
@@ -1401,6 +1471,8 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime);
int wl1251_acx_rate_policies(struct wl1251 *wl);
int wl1251_acx_mem_cfg(struct wl1251 *wl);
int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim);
+int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
+ u8 max_consecutive);
int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
u8 aifs, u16 txop);
int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
diff --git a/drivers/net/wireless/wl1251/event.c b/drivers/net/wireless/wl1251/event.c
index 712372e..dfc4579 100644
--- a/drivers/net/wireless/wl1251/event.c
+++ b/drivers/net/wireless/wl1251/event.c
@@ -90,6 +90,24 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
}
}
+ if (wl->vif && wl->rssi_thold) {
+ if (vector & ROAMING_TRIGGER_LOW_RSSI_EVENT_ID) {
+ wl1251_debug(DEBUG_EVENT,
+ "ROAMING_TRIGGER_LOW_RSSI_EVENT");
+ ieee80211_cqm_rssi_notify(wl->vif,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
+ GFP_KERNEL);
+ }
+
+ if (vector & ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID) {
+ wl1251_debug(DEBUG_EVENT,
+ "ROAMING_TRIGGER_REGAINED_RSSI_EVENT");
+ ieee80211_cqm_rssi_notify(wl->vif,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
+ GFP_KERNEL);
+ }
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c
index 40372ba..5a1c138 100644
--- a/drivers/net/wireless/wl1251/main.c
+++ b/drivers/net/wireless/wl1251/main.c
@@ -502,6 +502,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
wl->psm = 0;
wl->tx_queue_stopped = false;
wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
+ wl->rssi_thold = 0;
wl->channel = WL1251_DEFAULT_CHANNEL;
wl1251_debugfs_reset(wl);
@@ -959,6 +960,16 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
+ if (changed & BSS_CHANGED_CQM) {
+ ret = wl1251_acx_low_rssi(wl, bss_conf->cqm_rssi_thold,
+ WL1251_DEFAULT_LOW_RSSI_WEIGHT,
+ WL1251_DEFAULT_LOW_RSSI_DEPTH,
+ WL1251_ACX_LOW_RSSI_TYPE_EDGE);
+ if (ret < 0)
+ goto out;
+ wl->rssi_thold = bss_conf->cqm_rssi_thold;
+ }
+
if (changed & BSS_CHANGED_BSSID) {
memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
@@ -1313,9 +1324,11 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_BEACON_FILTER |
- IEEE80211_HW_SUPPORTS_UAPSD;
+ IEEE80211_HW_SUPPORTS_UAPSD |
+ IEEE80211_HW_SUPPORTS_CQM_RSSI;
- wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
wl->hw->wiphy->max_scan_ssids = 1;
wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz;
@@ -1377,6 +1390,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
wl->psm_requested = false;
wl->tx_queue_stopped = false;
wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
+ wl->rssi_thold = 0;
wl->beacon_int = WL1251_DEFAULT_BEACON_INT;
wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD;
wl->vif = NULL;
diff --git a/drivers/net/wireless/wl1251/ps.c b/drivers/net/wireless/wl1251/ps.c
index 5ed47c8..9ba23ed 100644
--- a/drivers/net/wireless/wl1251/ps.c
+++ b/drivers/net/wireless/wl1251/ps.c
@@ -153,6 +153,11 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
if (ret < 0)
return ret;
+ ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_ENABLE,
+ WL1251_DEFAULT_BET_CONSECUTIVE);
+ if (ret < 0)
+ return ret;
+
ret = wl1251_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
if (ret < 0)
return ret;
@@ -170,6 +175,12 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
if (ret < 0)
return ret;
+ /* disable BET */
+ ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_DISABLE,
+ WL1251_DEFAULT_BET_CONSECUTIVE);
+ if (ret < 0)
+ return ret;
+
/* disable beacon filtering */
ret = wl1251_acx_beacon_filter_opt(wl, false);
if (ret < 0)
diff --git a/drivers/net/wireless/wl1251/rx.c b/drivers/net/wireless/wl1251/rx.c
index efa5360..b659e15 100644
--- a/drivers/net/wireless/wl1251/rx.c
+++ b/drivers/net/wireless/wl1251/rx.c
@@ -78,7 +78,8 @@ static void wl1251_rx_status(struct wl1251 *wl,
*/
wl->noise = desc->rssi - desc->snr / 2;
- status->freq = ieee80211_channel_to_frequency(desc->channel);
+ status->freq = ieee80211_channel_to_frequency(desc->channel,
+ status->band);
status->flag |= RX_FLAG_TSFT;
@@ -95,8 +96,52 @@ static void wl1251_rx_status(struct wl1251 *wl,
if (unlikely(!(desc->flags & RX_DESC_VALID_FCS)))
status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ switch (desc->rate) {
+ /* skip 1 and 12 Mbps because they have same value 0x0a */
+ case RATE_2MBPS:
+ status->rate_idx = 1;
+ break;
+ case RATE_5_5MBPS:
+ status->rate_idx = 2;
+ break;
+ case RATE_11MBPS:
+ status->rate_idx = 3;
+ break;
+ case RATE_6MBPS:
+ status->rate_idx = 4;
+ break;
+ case RATE_9MBPS:
+ status->rate_idx = 5;
+ break;
+ case RATE_18MBPS:
+ status->rate_idx = 7;
+ break;
+ case RATE_24MBPS:
+ status->rate_idx = 8;
+ break;
+ case RATE_36MBPS:
+ status->rate_idx = 9;
+ break;
+ case RATE_48MBPS:
+ status->rate_idx = 10;
+ break;
+ case RATE_54MBPS:
+ status->rate_idx = 11;
+ break;
+ }
+
+ /* for 1 and 12 Mbps we have to check the modulation */
+ if (desc->rate == RATE_1MBPS) {
+ if (!(desc->mod_pre & OFDM_RATE_BIT))
+ /* CCK -> RATE_1MBPS */
+ status->rate_idx = 0;
+ else
+ /* OFDM -> RATE_12MBPS */
+ status->rate_idx = 6;
+ }
- /* FIXME: set status->rate_idx */
+ if (desc->mod_pre & SHORT_PREAMBLE_BIT)
+ status->flag |= RX_FLAG_SHORTPRE;
}
static void wl1251_rx_body(struct wl1251 *wl,
diff --git a/drivers/net/wireless/wl1251/tx.c b/drivers/net/wireless/wl1251/tx.c
index 554b4f9..28121c5 100644
--- a/drivers/net/wireless/wl1251/tx.c
+++ b/drivers/net/wireless/wl1251/tx.c
@@ -213,16 +213,30 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
wl1251_debug(DEBUG_TX, "skb offset %d", offset);
/* check whether the current skb can be used */
- if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
- unsigned char *src = skb->data;
+ if (skb_cloned(skb) || (skb_tailroom(skb) < offset)) {
+ struct sk_buff *newskb = skb_copy_expand(skb, 0, 3,
+ GFP_KERNEL);
+
+ if (unlikely(newskb == NULL)) {
+ wl1251_error("Can't allocate skb!");
+ return -EINVAL;
+ }
- /* align the buffer on a 4-byte boundary */
+ tx_hdr = (struct tx_double_buffer_desc *) newskb->data;
+
+ dev_kfree_skb_any(skb);
+ wl->tx_frames[tx_hdr->id] = skb = newskb;
+
+ offset = (4 - (long)skb->data) & 0x03;
+ wl1251_debug(DEBUG_TX, "new skb offset %d", offset);
+ }
+
+ /* align the buffer on a 4-byte boundary */
+ if (offset) {
+ unsigned char *src = skb->data;
skb_reserve(skb, offset);
memmove(skb->data, src, skb->len);
tx_hdr = (struct tx_double_buffer_desc *) skb->data;
- } else {
- wl1251_info("No handler, fixme!");
- return -EINVAL;
}
}
@@ -368,7 +382,7 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl,
{
struct ieee80211_tx_info *info;
struct sk_buff *skb;
- int hdrlen, ret;
+ int hdrlen;
u8 *frame;
skb = wl->tx_frames[result->id];
@@ -407,40 +421,12 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl,
ieee80211_tx_status(wl->hw, skb);
wl->tx_frames[result->id] = NULL;
-
- if (wl->tx_queue_stopped) {
- wl1251_debug(DEBUG_TX, "cb: queue was stopped");
-
- skb = skb_dequeue(&wl->tx_queue);
-
- /* The skb can be NULL because tx_work might have been
- scheduled before the queue was stopped making the
- queue empty */
-
- if (skb) {
- ret = wl1251_tx_frame(wl, skb);
- if (ret == -EBUSY) {
- /* firmware buffer is still full */
- wl1251_debug(DEBUG_TX, "cb: fw buffer "
- "still full");
- skb_queue_head(&wl->tx_queue, skb);
- return;
- } else if (ret < 0) {
- dev_kfree_skb(skb);
- return;
- }
- }
-
- wl1251_debug(DEBUG_TX, "cb: waking queues");
- ieee80211_wake_queues(wl->hw);
- wl->tx_queue_stopped = false;
- }
}
/* Called upon reception of a TX complete interrupt */
void wl1251_tx_complete(struct wl1251 *wl)
{
- int i, result_index, num_complete = 0;
+ int i, result_index, num_complete = 0, queue_len;
struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr;
unsigned long flags;
@@ -471,18 +457,22 @@ void wl1251_tx_complete(struct wl1251 *wl)
}
}
- if (wl->tx_queue_stopped
- &&
- skb_queue_len(&wl->tx_queue) <= WL1251_TX_QUEUE_LOW_WATERMARK){
+ queue_len = skb_queue_len(&wl->tx_queue);
- /* firmware buffer has space, restart queues */
+ if ((num_complete > 0) && (queue_len > 0)) {
+ /* firmware buffer has space, reschedule tx_work */
+ wl1251_debug(DEBUG_TX, "tx_complete: reschedule tx_work");
+ ieee80211_queue_work(wl->hw, &wl->tx_work);
+ }
+
+ if (wl->tx_queue_stopped &&
+ queue_len <= WL1251_TX_QUEUE_LOW_WATERMARK) {
+ /* tx_queue has space, restart queues */
wl1251_debug(DEBUG_TX, "tx_complete: waking queues");
spin_lock_irqsave(&wl->wl_lock, flags);
ieee80211_wake_queues(wl->hw);
wl->tx_queue_stopped = false;
spin_unlock_irqrestore(&wl->wl_lock, flags);
- ieee80211_queue_work(wl->hw, &wl->tx_work);
-
}
/* Every completed frame needs to be acknowledged */
diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
index c0ce2c8..bb23cd5 100644
--- a/drivers/net/wireless/wl1251/wl1251.h
+++ b/drivers/net/wireless/wl1251/wl1251.h
@@ -370,6 +370,8 @@ struct wl1251 {
/* in dBm */
int power_level;
+ int rssi_thold;
+
struct wl1251_stats stats;
struct wl1251_debugfs debugfs;
@@ -410,6 +412,8 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
#define WL1251_DEFAULT_CHANNEL 0
+#define WL1251_DEFAULT_BET_CONSECUTIVE 10
+
#define CHIP_ID_1251_PG10 (0x7010101)
#define CHIP_ID_1251_PG11 (0x7020101)
#define CHIP_ID_1251_PG12 (0x7030101)
@@ -431,4 +435,7 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
#define WL1251_PART_WORK_REG_START REGISTERS_BASE
#define WL1251_PART_WORK_REG_SIZE REGISTERS_WORK_SIZE
+#define WL1251_DEFAULT_LOW_RSSI_WEIGHT 10
+#define WL1251_DEFAULT_LOW_RSSI_DEPTH 10
+
#endif
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
index cc4068d..afdc601 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -751,10 +751,10 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
return 0;
}
-int wl1271_acx_rate_policies(struct wl1271 *wl)
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
{
- struct acx_rate_policy *acx;
- struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf;
+ struct acx_sta_rate_policy *acx;
+ struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf;
int idx = 0;
int ret = 0;
@@ -794,6 +794,38 @@ out:
return ret;
}
+int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
+ u8 idx)
+{
+ struct acx_ap_rate_policy *acx;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx ap rate policy");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->rate_policy.enabled_rates = cpu_to_le32(c->enabled_rates);
+ acx->rate_policy.short_retry_limit = c->short_retry_limit;
+ acx->rate_policy.long_retry_limit = c->long_retry_limit;
+ acx->rate_policy.aflags = c->aflags;
+
+ acx->rate_policy_idx = cpu_to_le32(idx);
+
+ ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("Setting of ap rate policy failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
u8 aifsn, u16 txop)
{
@@ -1233,6 +1265,7 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
struct wl1271_acx_ht_capabilities *acx;
u8 mac_address[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
int ret = 0;
+ u32 ht_capabilites = 0;
wl1271_debug(DEBUG_ACX, "acx ht capabilities setting");
@@ -1244,16 +1277,16 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
/* Allow HT Operation ? */
if (allow_ht_operation) {
- acx->ht_capabilites =
+ ht_capabilites =
WL1271_ACX_FW_CAP_HT_OPERATION;
if (ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD)
- acx->ht_capabilites |=
+ ht_capabilites |=
WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT;
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
- acx->ht_capabilites |=
+ ht_capabilites |=
WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS;
if (ht_cap->cap & IEEE80211_HT_CAP_LSIG_TXOP_PROT)
- acx->ht_capabilites |=
+ ht_capabilites |=
WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION;
/* get data from A-MPDU parameters field */
@@ -1261,10 +1294,10 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
acx->ampdu_min_spacing = ht_cap->ampdu_density;
memcpy(acx->mac_address, mac_address, ETH_ALEN);
- } else { /* HT operations are not allowed */
- acx->ht_capabilites = 0;
}
+ acx->ht_capabilites = cpu_to_le32(ht_capabilites);
+
ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx ht capabilities setting failed: %d", ret);
@@ -1309,6 +1342,91 @@ out:
return ret;
}
+/* Configure BA session initiator/receiver parameters setting in the FW. */
+int wl1271_acx_set_ba_session(struct wl1271 *wl,
+ enum ieee80211_back_parties direction,
+ u8 tid_index, u8 policy)
+{
+ struct wl1271_acx_ba_session_policy *acx;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx ba session setting");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* ANY role */
+ acx->role_id = 0xff;
+ acx->tid = tid_index;
+ acx->enable = policy;
+ acx->ba_direction = direction;
+
+ switch (direction) {
+ case WLAN_BACK_INITIATOR:
+ acx->win_size = wl->conf.ht.tx_ba_win_size;
+ acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
+ break;
+ case WLAN_BACK_RECIPIENT:
+ acx->win_size = RX_BA_WIN_SIZE;
+ acx->inactivity_timeout = 0;
+ break;
+ default:
+ wl1271_error("Incorrect acx command id=%x\n", direction);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = wl1271_cmd_configure(wl,
+ ACX_BA_SESSION_POLICY_CFG,
+ acx,
+ sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx ba session setting failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+/* setup BA session receiver setting in the FW. */
+int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
+ bool enable)
+{
+ struct wl1271_acx_ba_receiver_setup *acx;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx ba receiver session setting");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Single link for now */
+ acx->link_id = 1;
+ acx->tid = tid_index;
+ acx->enable = enable;
+ acx->win_size = 0;
+ acx->ssn = ssn;
+
+ ret = wl1271_cmd_configure(wl, ACX_BA_SESSION_RX_SETUP, acx,
+ sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx ba receiver session failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime)
{
struct wl1271_acx_fw_tsf_information *tsf_info;
@@ -1334,3 +1452,27 @@ out:
kfree(tsf_info);
return ret;
}
+
+int wl1271_acx_max_tx_retry(struct wl1271 *wl)
+{
+ struct wl1271_acx_max_tx_retry *acx = NULL;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx max tx retry");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx)
+ return -ENOMEM;
+
+ acx->max_tx_retry = cpu_to_le16(wl->conf.tx.ap_max_tx_retries);
+
+ ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx max tx retry failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h
index 7bd8e4d..4bbaf04 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -747,13 +747,23 @@ struct acx_rate_class {
#define ACX_TX_BASIC_RATE 0
#define ACX_TX_AP_FULL_RATE 1
#define ACX_TX_RATE_POLICY_CNT 2
-struct acx_rate_policy {
+struct acx_sta_rate_policy {
struct acx_header header;
__le32 rate_class_cnt;
struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES];
} __packed;
+
+#define ACX_TX_AP_MODE_MGMT_RATE 4
+#define ACX_TX_AP_MODE_BCST_RATE 5
+struct acx_ap_rate_policy {
+ struct acx_header header;
+
+ __le32 rate_policy_idx;
+ struct acx_rate_class rate_policy;
+} __packed;
+
struct acx_ac_cfg {
struct acx_header header;
u8 ac;
@@ -1051,6 +1061,59 @@ struct wl1271_acx_ht_information {
u8 padding[3];
} __packed;
+#define RX_BA_WIN_SIZE 8
+
+struct wl1271_acx_ba_session_policy {
+ struct acx_header header;
+ /*
+ * Specifies role Id, Range 0-7, 0xFF means ANY role.
+ * Future use. For now this field is irrelevant
+ */
+ u8 role_id;
+ /*
+ * Specifies Link Id, Range 0-31, 0xFF means ANY Link Id.
+ * Not applicable if Role Id is set to ANY.
+ */
+ u8 link_id;
+
+ u8 tid;
+
+ u8 enable;
+
+ /* Windows size in number of packets */
+ u16 win_size;
+
+ /*
+ * As initiator inactivity timeout in time units(TU) of 1024us.
+ * As receiver reserved
+ */
+ u16 inactivity_timeout;
+
+ /* Initiator = 1/Receiver = 0 */
+ u8 ba_direction;
+
+ u8 padding[3];
+} __packed;
+
+struct wl1271_acx_ba_receiver_setup {
+ struct acx_header header;
+
+ /* Specifies Link Id, Range 0-31, 0xFF means ANY Link Id */
+ u8 link_id;
+
+ u8 tid;
+
+ u8 enable;
+
+ u8 padding[1];
+
+ /* Windows size in number of packets */
+ u16 win_size;
+
+ /* BA session starting sequence number. RANGE 0-FFF */
+ u16 ssn;
+} __packed;
+
struct wl1271_acx_fw_tsf_information {
struct acx_header header;
@@ -1062,6 +1125,17 @@ struct wl1271_acx_fw_tsf_information {
u8 padding[3];
} __packed;
+struct wl1271_acx_max_tx_retry {
+ struct acx_header header;
+
+ /*
+ * the number of frames transmission failures before
+ * issuing the aging event.
+ */
+ __le16 max_tx_retry;
+ u8 padding_1[2];
+} __packed;
+
enum {
ACX_WAKE_UP_CONDITIONS = 0x0002,
ACX_MEM_CFG = 0x0003,
@@ -1113,12 +1187,13 @@ enum {
ACX_RSSI_SNR_WEIGHTS = 0x0052,
ACX_KEEP_ALIVE_MODE = 0x0053,
ACX_SET_KEEP_ALIVE_CONFIG = 0x0054,
- ACX_BA_SESSION_RESPONDER_POLICY = 0x0055,
- ACX_BA_SESSION_INITIATOR_POLICY = 0x0056,
+ ACX_BA_SESSION_POLICY_CFG = 0x0055,
+ ACX_BA_SESSION_RX_SETUP = 0x0056,
ACX_PEER_HT_CAP = 0x0057,
ACX_HT_BSS_OPERATION = 0x0058,
ACX_COEX_ACTIVITY = 0x0059,
ACX_SET_DCO_ITRIM_PARAMS = 0x0061,
+ ACX_MAX_TX_FAILURE = 0x0072,
DOT11_RX_MSDU_LIFE_TIME = 0x1004,
DOT11_CUR_TX_PWR = 0x100D,
DOT11_RX_DOT11_MODE = 0x1012,
@@ -1160,7 +1235,9 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
int wl1271_acx_cts_protect(struct wl1271 *wl,
enum acx_ctsprotect_type ctsprotect);
int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
-int wl1271_acx_rate_policies(struct wl1271 *wl);
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl);
+int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
+ u8 idx);
int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
u8 aifsn, u16 txop);
int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
@@ -1185,6 +1262,12 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
bool allow_ht_operation);
int wl1271_acx_set_ht_information(struct wl1271 *wl,
u16 ht_operation_mode);
+int wl1271_acx_set_ba_session(struct wl1271 *wl,
+ enum ieee80211_back_parties direction,
+ u8 tid_index, u8 policy);
+int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
+ bool enable);
int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
+int wl1271_acx_max_tx_retry(struct wl1271 *wl);
#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
index 4df04f8..1ffbad6 100644
--- a/drivers/net/wireless/wl12xx/boot.c
+++ b/drivers/net/wireless/wl12xx/boot.c
@@ -28,6 +28,7 @@
#include "boot.h"
#include "io.h"
#include "event.h"
+#include "rx.h"
static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
[PART_DOWN] = {
@@ -100,6 +101,22 @@ static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
wl1271_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
}
+static void wl1271_parse_fw_ver(struct wl1271 *wl)
+{
+ int ret;
+
+ ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u",
+ &wl->chip.fw_ver[0], &wl->chip.fw_ver[1],
+ &wl->chip.fw_ver[2], &wl->chip.fw_ver[3],
+ &wl->chip.fw_ver[4]);
+
+ if (ret != 5) {
+ wl1271_warning("fw version incorrect value");
+ memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver));
+ return;
+ }
+}
+
static void wl1271_boot_fw_version(struct wl1271 *wl)
{
struct wl1271_static_data static_data;
@@ -107,11 +124,13 @@ static void wl1271_boot_fw_version(struct wl1271 *wl)
wl1271_read(wl, wl->cmd_box_addr, &static_data, sizeof(static_data),
false);
- strncpy(wl->chip.fw_ver, static_data.fw_version,
- sizeof(wl->chip.fw_ver));
+ strncpy(wl->chip.fw_ver_str, static_data.fw_version,
+ sizeof(wl->chip.fw_ver_str));
/* make sure the string is NULL-terminated */
- wl->chip.fw_ver[sizeof(wl->chip.fw_ver) - 1] = '\0';
+ wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0';
+
+ wl1271_parse_fw_ver(wl);
}
static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
@@ -231,7 +250,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
*/
if (wl->nvs_len == sizeof(struct wl1271_nvs_file) ||
wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) {
- if (wl->nvs->general_params.dual_mode_select)
+ /* for now 11a is unsupported in AP mode */
+ if (wl->bss_type != BSS_TYPE_AP_BSS &&
+ wl->nvs->general_params.dual_mode_select)
wl->enable_11a = true;
}
@@ -431,6 +452,9 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
PSPOLL_DELIVERY_FAILURE_EVENT_ID |
SOFT_GEMINI_SENSE_EVENT_ID;
+ if (wl->bss_type == BSS_TYPE_AP_BSS)
+ wl->event_mask |= STA_REMOVE_COMPLETE_EVENT_ID;
+
ret = wl1271_event_unmask(wl);
if (ret < 0) {
wl1271_error("EVENT mask setting failed");
@@ -595,8 +619,7 @@ int wl1271_boot(struct wl1271 *wl)
wl1271_boot_enable_interrupts(wl);
/* set the wl1271 default filters */
- wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
- wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
+ wl1271_set_default_filters(wl);
wl1271_event_mbox_config(wl);
diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c
index 0106628a..1bb8be5 100644
--- a/drivers/net/wireless/wl12xx/cmd.c
+++ b/drivers/net/wireless/wl12xx/cmd.c
@@ -36,6 +36,7 @@
#include "wl12xx_80211.h"
#include "cmd.h"
#include "event.h"
+#include "tx.h"
#define WL1271_CMD_FAST_POLL_COUNT 50
@@ -221,7 +222,7 @@ int wl1271_cmd_ext_radio_parms(struct wl1271 *wl)
* Poll the mailbox event field until any of the bits in the mask is set or a
* timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
*/
-static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
+static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask)
{
u32 events_vector, event;
unsigned long timeout;
@@ -230,7 +231,8 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
do {
if (time_after(jiffies, timeout)) {
- ieee80211_queue_work(wl->hw, &wl->recovery_work);
+ wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
+ (int)mask);
return -ETIMEDOUT;
}
@@ -248,6 +250,19 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
return 0;
}
+static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
+{
+ int ret;
+
+ ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask);
+ if (ret != 0) {
+ ieee80211_queue_work(wl->hw, &wl->recovery_work);
+ return ret;
+ }
+
+ return 0;
+}
+
int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
{
struct wl1271_cmd_join *join;
@@ -490,8 +505,8 @@ int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
cmd->len = cpu_to_le16(buf_len);
cmd->template_type = template_id;
cmd->enabled_rates = cpu_to_le32(rates);
- cmd->short_retry_limit = wl->conf.tx.rc_conf.short_retry_limit;
- cmd->long_retry_limit = wl->conf.tx.rc_conf.long_retry_limit;
+ cmd->short_retry_limit = wl->conf.tx.tmpl_short_retry_limit;
+ cmd->long_retry_limit = wl->conf.tx.tmpl_long_retry_limit;
cmd->index = index;
if (buf)
@@ -659,15 +674,15 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
/* llc layer */
memcpy(tmpl.llc_hdr, rfc1042_header, sizeof(rfc1042_header));
- tmpl.llc_type = htons(ETH_P_ARP);
+ tmpl.llc_type = cpu_to_be16(ETH_P_ARP);
/* arp header */
arp_hdr = &tmpl.arp_hdr;
- arp_hdr->ar_hrd = htons(ARPHRD_ETHER);
- arp_hdr->ar_pro = htons(ETH_P_IP);
+ arp_hdr->ar_hrd = cpu_to_be16(ARPHRD_ETHER);
+ arp_hdr->ar_pro = cpu_to_be16(ETH_P_IP);
arp_hdr->ar_hln = ETH_ALEN;
arp_hdr->ar_pln = 4;
- arp_hdr->ar_op = htons(ARPOP_REPLY);
+ arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY);
/* arp payload */
memcpy(tmpl.sender_hw, wl->vif->addr, ETH_ALEN);
@@ -702,9 +717,9 @@ int wl1271_build_qos_null_data(struct wl1271 *wl)
wl->basic_rate);
}
-int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id)
+int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id)
{
- struct wl1271_cmd_set_keys *cmd;
+ struct wl1271_cmd_set_sta_keys *cmd;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd set_default_wep_key %d", id);
@@ -731,11 +746,42 @@ out:
return ret;
}
-int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id)
+{
+ struct wl1271_cmd_set_ap_keys *cmd;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_CMD, "cmd set_ap_default_wep_key %d", id);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->hlid = WL1271_AP_BROADCAST_HLID;
+ cmd->key_id = id;
+ cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
+ cmd->key_action = cpu_to_le16(KEY_SET_ID);
+ cmd->key_type = KEY_WEP;
+
+ ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_warning("cmd set_ap_default_wep_key failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(cmd);
+
+ return ret;
+}
+
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, const u8 *addr,
u32 tx_seq_32, u16 tx_seq_16)
{
- struct wl1271_cmd_set_keys *cmd;
+ struct wl1271_cmd_set_sta_keys *cmd;
int ret = 0;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -788,6 +834,67 @@ out:
return ret;
}
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+ u16 tx_seq_16)
+{
+ struct wl1271_cmd_set_ap_keys *cmd;
+ int ret = 0;
+ u8 lid_type;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ if (hlid == WL1271_AP_BROADCAST_HLID) {
+ if (key_type == KEY_WEP)
+ lid_type = WEP_DEFAULT_LID_TYPE;
+ else
+ lid_type = BROADCAST_LID_TYPE;
+ } else {
+ lid_type = UNICAST_LID_TYPE;
+ }
+
+ wl1271_debug(DEBUG_CRYPT, "ap key action: %d id: %d lid: %d type: %d"
+ " hlid: %d", (int)action, (int)id, (int)lid_type,
+ (int)key_type, (int)hlid);
+
+ cmd->lid_key_type = lid_type;
+ cmd->hlid = hlid;
+ cmd->key_action = cpu_to_le16(action);
+ cmd->key_size = key_size;
+ cmd->key_type = key_type;
+ cmd->key_id = id;
+ cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16);
+ cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32);
+
+ if (key_type == KEY_TKIP) {
+ /*
+ * We get the key in the following form:
+ * TKIP (16 bytes) - TX MIC (8 bytes) - RX MIC (8 bytes)
+ * but the target is expecting:
+ * TKIP - RX MIC - TX MIC
+ */
+ memcpy(cmd->key, key, 16);
+ memcpy(cmd->key + 16, key + 24, 8);
+ memcpy(cmd->key + 24, key + 16, 8);
+ } else {
+ memcpy(cmd->key, key, key_size);
+ }
+
+ wl1271_dump(DEBUG_CRYPT, "TARGET AP KEY: ", cmd, sizeof(*cmd));
+
+ ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_warning("could not set ap keys");
+ goto out;
+ }
+
+out:
+ kfree(cmd);
+ return ret;
+}
+
int wl1271_cmd_disconnect(struct wl1271 *wl)
{
struct wl1271_cmd_disconnect *cmd;
@@ -850,3 +957,180 @@ out_free:
out:
return ret;
}
+
+int wl1271_cmd_start_bss(struct wl1271 *wl)
+{
+ struct wl1271_cmd_bss_start *cmd;
+ struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD, "cmd start bss");
+
+ /*
+ * FIXME: We currently do not support hidden SSID. The real SSID
+ * should be fetched from mac80211 first.
+ */
+ if (wl->ssid_len == 0) {
+ wl1271_warning("Hidden SSID currently not supported for AP");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(cmd->bssid, bss_conf->bssid, ETH_ALEN);
+
+ cmd->aging_period = cpu_to_le16(WL1271_AP_DEF_INACTIV_SEC);
+ cmd->bss_index = WL1271_AP_BSS_INDEX;
+ cmd->global_hlid = WL1271_AP_GLOBAL_HLID;
+ cmd->broadcast_hlid = WL1271_AP_BROADCAST_HLID;
+ cmd->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
+ cmd->beacon_interval = cpu_to_le16(wl->beacon_int);
+ cmd->dtim_interval = bss_conf->dtim_period;
+ cmd->beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
+ cmd->channel = wl->channel;
+ cmd->ssid_len = wl->ssid_len;
+ cmd->ssid_type = SSID_TYPE_PUBLIC;
+ memcpy(cmd->ssid, wl->ssid, wl->ssid_len);
+
+ switch (wl->band) {
+ case IEEE80211_BAND_2GHZ:
+ cmd->band = RADIO_BAND_2_4GHZ;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ cmd->band = RADIO_BAND_5GHZ;
+ break;
+ default:
+ wl1271_warning("bss start - unknown band: %d", (int)wl->band);
+ cmd->band = RADIO_BAND_2_4GHZ;
+ break;
+ }
+
+ ret = wl1271_cmd_send(wl, CMD_BSS_START, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd start bss");
+ goto out_free;
+ }
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
+
+int wl1271_cmd_stop_bss(struct wl1271 *wl)
+{
+ struct wl1271_cmd_bss_start *cmd;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD, "cmd stop bss");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->bss_index = WL1271_AP_BSS_INDEX;
+
+ ret = wl1271_cmd_send(wl, CMD_BSS_STOP, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd stop bss");
+ goto out_free;
+ }
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
+
+int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
+{
+ struct wl1271_cmd_add_sta *cmd;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD, "cmd add sta %d", (int)hlid);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* currently we don't support UAPSD */
+ cmd->sp_len = 0;
+
+ memcpy(cmd->addr, sta->addr, ETH_ALEN);
+ cmd->bss_index = WL1271_AP_BSS_INDEX;
+ cmd->aid = sta->aid;
+ cmd->hlid = hlid;
+
+ /*
+ * FIXME: Does STA support QOS? We need to propagate this info from
+ * hostapd. Currently not that important since this is only used for
+ * sending the correct flavor of null-data packet in response to a
+ * trigger.
+ */
+ cmd->wmm = 0;
+
+ cmd->supported_rates = cpu_to_le32(wl1271_tx_enabled_rates_get(wl,
+ sta->supp_rates[wl->band]));
+
+ wl1271_debug(DEBUG_CMD, "new sta rates: 0x%x", cmd->supported_rates);
+
+ ret = wl1271_cmd_send(wl, CMD_ADD_STA, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd add sta");
+ goto out_free;
+ }
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
+
+int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid)
+{
+ struct wl1271_cmd_remove_sta *cmd;
+ int ret;
+
+ wl1271_debug(DEBUG_CMD, "cmd remove sta %d", (int)hlid);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cmd->hlid = hlid;
+ /* We never send a deauth, mac80211 is in charge of this */
+ cmd->reason_opcode = 0;
+ cmd->send_deauth_flag = 0;
+
+ ret = wl1271_cmd_send(wl, CMD_REMOVE_STA, cmd, sizeof(*cmd), 0);
+ if (ret < 0) {
+ wl1271_error("failed to initiate cmd remove sta");
+ goto out_free;
+ }
+
+ /*
+ * We are ok with a timeout here. The event is sometimes not sent
+ * due to a firmware bug.
+ */
+ wl1271_cmd_wait_for_event_or_timeout(wl, STA_REMOVE_COMPLETE_EVENT_ID);
+
+out_free:
+ kfree(cmd);
+
+out:
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/wl12xx/cmd.h
index 2a1d9db..7512814 100644
--- a/drivers/net/wireless/wl12xx/cmd.h
+++ b/drivers/net/wireless/wl12xx/cmd.h
@@ -54,12 +54,20 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr);
int wl1271_build_qos_null_data(struct wl1271 *wl);
int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
-int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id);
-int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
- u8 key_size, const u8 *key, const u8 *addr,
- u32 tx_seq_32, u16 tx_seq_16);
+int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id);
+int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id);
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, const u8 *addr,
+ u32 tx_seq_32, u16 tx_seq_16);
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+ u16 tx_seq_16);
int wl1271_cmd_disconnect(struct wl1271 *wl);
int wl1271_cmd_set_sta_state(struct wl1271 *wl);
+int wl1271_cmd_start_bss(struct wl1271 *wl);
+int wl1271_cmd_stop_bss(struct wl1271 *wl);
+int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid);
+int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid);
enum wl1271_commands {
CMD_INTERROGATE = 1, /*use this to read information elements*/
@@ -98,6 +106,12 @@ enum wl1271_commands {
CMD_STOP_PERIODIC_SCAN = 51,
CMD_SET_STA_STATE = 52,
+ /* AP mode commands */
+ CMD_BSS_START = 60,
+ CMD_BSS_STOP = 61,
+ CMD_ADD_STA = 62,
+ CMD_REMOVE_STA = 63,
+
NUM_COMMANDS,
MAX_COMMAND_ID = 0xFFFF,
};
@@ -126,6 +140,13 @@ enum cmd_templ {
* For CTS-to-self (FastCTS) mechanism
* for BT/WLAN coexistence (SoftGemini). */
CMD_TEMPL_ARP_RSP,
+
+ /* AP-mode specific */
+ CMD_TEMPL_AP_BEACON = 13,
+ CMD_TEMPL_AP_PROBE_RESPONSE,
+ CMD_TEMPL_AP_ARP_RSP,
+ CMD_TEMPL_DEAUTH_AP,
+
CMD_TEMPL_MAX = 0xff
};
@@ -270,7 +291,6 @@ struct wl1271_cmd_ps_params {
/* HW encryption keys */
#define NUM_ACCESS_CATEGORIES_COPY 4
-#define MAX_KEY_SIZE 32
enum wl1271_cmd_key_action {
KEY_ADD_OR_REPLACE = 1,
@@ -289,7 +309,7 @@ enum wl1271_cmd_key_type {
/* FIXME: Add description for key-types */
-struct wl1271_cmd_set_keys {
+struct wl1271_cmd_set_sta_keys {
struct wl1271_cmd_header header;
/* Ignored for default WEP key */
@@ -318,6 +338,57 @@ struct wl1271_cmd_set_keys {
__le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
} __packed;
+enum wl1271_cmd_lid_key_type {
+ UNICAST_LID_TYPE = 0,
+ BROADCAST_LID_TYPE = 1,
+ WEP_DEFAULT_LID_TYPE = 2
+};
+
+struct wl1271_cmd_set_ap_keys {
+ struct wl1271_cmd_header header;
+
+ /*
+ * Indicates whether the HLID is a unicast key set
+ * or broadcast key set. A special value 0xFF is
+ * used to indicate that the HLID is on WEP-default
+ * (multi-hlids). of type wl1271_cmd_lid_key_type.
+ */
+ u8 hlid;
+
+ /*
+ * In WEP-default network (hlid == 0xFF) used to
+ * indicate which network STA/IBSS/AP role should be
+ * changed
+ */
+ u8 lid_key_type;
+
+ /*
+ * Key ID - For TKIP and AES key types, this field
+ * indicates the value that should be inserted into
+ * the KeyID field of frames transmitted using this
+ * key entry. For broadcast keys the index use as a
+ * marker for TX/RX key.
+ * For WEP default network (HLID=0xFF), this field
+ * indicates the ID of the key to add or remove.
+ */
+ u8 key_id;
+ u8 reserved_1;
+
+ /* key_action_e */
+ __le16 key_action;
+
+ /* key size in bytes */
+ u8 key_size;
+
+ /* key_type_e */
+ u8 key_type;
+
+ /* This field holds the security key data to add to the STA table */
+ u8 key[MAX_KEY_SIZE];
+ __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
+ __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
+} __packed;
+
struct wl1271_cmd_test_header {
u8 id;
u8 padding[3];
@@ -412,4 +483,68 @@ struct wl1271_cmd_set_sta_state {
u8 padding[3];
} __packed;
+enum wl1271_ssid_type {
+ SSID_TYPE_PUBLIC = 0,
+ SSID_TYPE_HIDDEN = 1
+};
+
+struct wl1271_cmd_bss_start {
+ struct wl1271_cmd_header header;
+
+ /* wl1271_ssid_type */
+ u8 ssid_type;
+ u8 ssid_len;
+ u8 ssid[IW_ESSID_MAX_SIZE];
+ u8 padding_1[2];
+
+ /* Basic rate set */
+ __le32 basic_rate_set;
+ /* Aging period in seconds*/
+ __le16 aging_period;
+
+ /*
+ * This field specifies the time between target beacon
+ * transmission times (TBTTs), in time units (TUs).
+ * Valid values are 1 to 1024.
+ */
+ __le16 beacon_interval;
+ u8 bssid[ETH_ALEN];
+ u8 bss_index;
+ /* Radio band */
+ u8 band;
+ u8 channel;
+ /* The host link id for the AP's global queue */
+ u8 global_hlid;
+ /* The host link id for the AP's broadcast queue */
+ u8 broadcast_hlid;
+ /* DTIM count */
+ u8 dtim_interval;
+ /* Beacon expiry time in ms */
+ u8 beacon_expiry;
+ u8 padding_2[3];
+} __packed;
+
+struct wl1271_cmd_add_sta {
+ struct wl1271_cmd_header header;
+
+ u8 addr[ETH_ALEN];
+ u8 hlid;
+ u8 aid;
+ u8 psd_type[NUM_ACCESS_CATEGORIES_COPY];
+ __le32 supported_rates;
+ u8 bss_index;
+ u8 sp_len;
+ u8 wmm;
+ u8 padding1;
+} __packed;
+
+struct wl1271_cmd_remove_sta {
+ struct wl1271_cmd_header header;
+
+ u8 hlid;
+ u8 reason_opcode;
+ u8 send_deauth_flag;
+ u8 padding1;
+} __packed;
+
#endif /* __WL1271_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h
index a16b361..fd1dac9 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -496,6 +496,26 @@ struct conf_rx_settings {
CONF_HW_BIT_RATE_2MBPS)
#define CONF_TX_RATE_RETRY_LIMIT 10
+/*
+ * Rates supported for data packets when operating as AP. Note the absense
+ * of the 22Mbps rate. There is a FW limitation on 12 rates so we must drop
+ * one. The rate dropped is not mandatory under any operating mode.
+ */
+#define CONF_TX_AP_ENABLED_RATES (CONF_HW_BIT_RATE_1MBPS | \
+ CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \
+ CONF_HW_BIT_RATE_6MBPS | CONF_HW_BIT_RATE_9MBPS | \
+ CONF_HW_BIT_RATE_11MBPS | CONF_HW_BIT_RATE_12MBPS | \
+ CONF_HW_BIT_RATE_18MBPS | CONF_HW_BIT_RATE_24MBPS | \
+ CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
+ CONF_HW_BIT_RATE_54MBPS)
+
+/*
+ * Default rates for management traffic when operating in AP mode. This
+ * should be configured according to the basic rate set of the AP
+ */
+#define CONF_TX_AP_DEFAULT_MGMT_RATES (CONF_HW_BIT_RATE_1MBPS | \
+ CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS)
+
struct conf_tx_rate_class {
/*
@@ -636,9 +656,9 @@ struct conf_tx_settings {
/*
* Configuration for rate classes for TX (currently only one
- * rate class supported.)
+ * rate class supported). Used in non-AP mode.
*/
- struct conf_tx_rate_class rc_conf;
+ struct conf_tx_rate_class sta_rc_conf;
/*
* Configuration for access categories for TX rate control.
@@ -647,6 +667,28 @@ struct conf_tx_settings {
struct conf_tx_ac_category ac_conf[CONF_TX_MAX_AC_COUNT];
/*
+ * Configuration for rate classes in AP-mode. These rate classes
+ * are for the AC TX queues
+ */
+ struct conf_tx_rate_class ap_rc_conf[CONF_TX_MAX_AC_COUNT];
+
+ /*
+ * Management TX rate class for AP-mode.
+ */
+ struct conf_tx_rate_class ap_mgmt_conf;
+
+ /*
+ * Broadcast TX rate class for AP-mode.
+ */
+ struct conf_tx_rate_class ap_bcst_conf;
+
+ /*
+ * AP-mode - allow this number of TX retries to a station before an
+ * event is triggered from FW.
+ */
+ u16 ap_max_tx_retries;
+
+ /*
* Configuration for TID parameters.
*/
u8 tid_conf_count;
@@ -687,6 +729,12 @@ struct conf_tx_settings {
* Range: CONF_HW_BIT_RATE_* bit mask
*/
u32 basic_rate_5;
+
+ /*
+ * TX retry limits for templates
+ */
+ u8 tmpl_short_retry_limit;
+ u8 tmpl_long_retry_limit;
};
enum {
@@ -1036,30 +1084,30 @@ struct conf_scan_settings {
/*
* The minimum time to wait on each channel for active scans
*
- * Range: 0 - 65536 tu
+ * Range: u32 tu/1000
*/
- u16 min_dwell_time_active;
+ u32 min_dwell_time_active;
/*
* The maximum time to wait on each channel for active scans
*
- * Range: 0 - 65536 tu
+ * Range: u32 tu/1000
*/
- u16 max_dwell_time_active;
+ u32 max_dwell_time_active;
/*
- * The maximum time to wait on each channel for passive scans
+ * The minimum time to wait on each channel for passive scans
*
- * Range: 0 - 65536 tu
+ * Range: u32 tu/1000
*/
- u16 min_dwell_time_passive;
+ u32 min_dwell_time_passive;
/*
* The maximum time to wait on each channel for passive scans
*
- * Range: 0 - 65536 tu
+ * Range: u32 tu/1000
*/
- u16 max_dwell_time_passive;
+ u32 max_dwell_time_passive;
/*
* Number of probe requests to transmit on each active scan channel
@@ -1090,6 +1138,11 @@ struct conf_rf_settings {
u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5];
};
+struct conf_ht_setting {
+ u16 tx_ba_win_size;
+ u16 inactivity_timeout;
+};
+
struct conf_drv_settings {
struct conf_sg_settings sg;
struct conf_rx_settings rx;
@@ -1100,6 +1153,7 @@ struct conf_drv_settings {
struct conf_roam_trigger_settings roam_trigger;
struct conf_scan_settings scan;
struct conf_rf_settings rf;
+ struct conf_ht_setting ht;
};
#endif
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c
index ec60777..bebfa28 100644
--- a/drivers/net/wireless/wl12xx/debugfs.c
+++ b/drivers/net/wireless/wl12xx/debugfs.c
@@ -261,27 +261,25 @@ static ssize_t gpio_power_write(struct file *file,
unsigned long value;
int ret;
- mutex_lock(&wl->mutex);
-
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len)) {
- ret = -EFAULT;
- goto out;
+ return -EFAULT;
}
buf[len] = '\0';
ret = strict_strtoul(buf, 0, &value);
if (ret < 0) {
wl1271_warning("illegal value in gpio_power");
- goto out;
+ return -EINVAL;
}
+ mutex_lock(&wl->mutex);
+
if (value)
wl1271_power_on(wl);
else
wl1271_power_off(wl);
-out:
mutex_unlock(&wl->mutex);
return count;
}
@@ -293,12 +291,13 @@ static const struct file_operations gpio_power_ops = {
.llseek = default_llseek,
};
-static int wl1271_debugfs_add_files(struct wl1271 *wl)
+static int wl1271_debugfs_add_files(struct wl1271 *wl,
+ struct dentry *rootdir)
{
int ret = 0;
struct dentry *entry, *stats;
- stats = debugfs_create_dir("fw-statistics", wl->rootdir);
+ stats = debugfs_create_dir("fw-statistics", rootdir);
if (!stats || IS_ERR(stats)) {
entry = stats;
goto err;
@@ -395,16 +394,11 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl)
DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data);
DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
- DEBUGFS_ADD(tx_queue_len, wl->rootdir);
- DEBUGFS_ADD(retry_count, wl->rootdir);
- DEBUGFS_ADD(excessive_retries, wl->rootdir);
-
- DEBUGFS_ADD(gpio_power, wl->rootdir);
+ DEBUGFS_ADD(tx_queue_len, rootdir);
+ DEBUGFS_ADD(retry_count, rootdir);
+ DEBUGFS_ADD(excessive_retries, rootdir);
- entry = debugfs_create_x32("debug_level", 0600, wl->rootdir,
- &wl12xx_debug_level);
- if (!entry || IS_ERR(entry))
- goto err;
+ DEBUGFS_ADD(gpio_power, rootdir);
return 0;
@@ -419,7 +413,7 @@ err:
void wl1271_debugfs_reset(struct wl1271 *wl)
{
- if (!wl->rootdir)
+ if (!wl->stats.fw_stats)
return;
memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
@@ -430,13 +424,13 @@ void wl1271_debugfs_reset(struct wl1271 *wl)
int wl1271_debugfs_init(struct wl1271 *wl)
{
int ret;
+ struct dentry *rootdir;
- wl->rootdir = debugfs_create_dir(KBUILD_MODNAME,
- wl->hw->wiphy->debugfsdir);
+ rootdir = debugfs_create_dir(KBUILD_MODNAME,
+ wl->hw->wiphy->debugfsdir);
- if (IS_ERR(wl->rootdir)) {
- ret = PTR_ERR(wl->rootdir);
- wl->rootdir = NULL;
+ if (IS_ERR(rootdir)) {
+ ret = PTR_ERR(rootdir);
goto err;
}
@@ -450,7 +444,7 @@ int wl1271_debugfs_init(struct wl1271 *wl)
wl->stats.fw_stats_update = jiffies;
- ret = wl1271_debugfs_add_files(wl);
+ ret = wl1271_debugfs_add_files(wl, rootdir);
if (ret < 0)
goto err_file;
@@ -462,8 +456,7 @@ err_file:
wl->stats.fw_stats = NULL;
err_fw:
- debugfs_remove_recursive(wl->rootdir);
- wl->rootdir = NULL;
+ debugfs_remove_recursive(rootdir);
err:
return ret;
@@ -473,8 +466,4 @@ void wl1271_debugfs_exit(struct wl1271 *wl)
{
kfree(wl->stats.fw_stats);
wl->stats.fw_stats = NULL;
-
- debugfs_remove_recursive(wl->rootdir);
- wl->rootdir = NULL;
-
}
diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/wl12xx/event.c
index f9146f5..3376a5d 100644
--- a/drivers/net/wireless/wl12xx/event.c
+++ b/drivers/net/wireless/wl12xx/event.c
@@ -186,6 +186,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
int ret;
u32 vector;
bool beacon_loss = false;
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
wl1271_event_mbox_dump(mbox);
@@ -218,21 +219,21 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
* BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
*
*/
- if (vector & BSS_LOSE_EVENT_ID) {
+ if ((vector & BSS_LOSE_EVENT_ID) && !is_ap) {
wl1271_info("Beacon loss detected.");
/* indicate to the stack, that beacons have been lost */
beacon_loss = true;
}
- if (vector & PS_REPORT_EVENT_ID) {
+ if ((vector & PS_REPORT_EVENT_ID) && !is_ap) {
wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
ret = wl1271_event_ps_report(wl, mbox, &beacon_loss);
if (ret < 0)
return ret;
}
- if (vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID)
+ if ((vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) && !is_ap)
wl1271_event_pspoll_delivery_fail(wl);
if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/wl12xx/event.h
index 6cce014..1d5ef67 100644
--- a/drivers/net/wireless/wl12xx/event.h
+++ b/drivers/net/wireless/wl12xx/event.h
@@ -59,6 +59,7 @@ enum {
BSS_LOSE_EVENT_ID = BIT(18),
REGAINED_BSS_EVENT_ID = BIT(19),
ROAMING_TRIGGER_MAX_TX_RETRY_EVENT_ID = BIT(20),
+ STA_REMOVE_COMPLETE_EVENT_ID = BIT(21), /* AP */
SOFT_GEMINI_SENSE_EVENT_ID = BIT(22),
SOFT_GEMINI_PREDICTION_EVENT_ID = BIT(23),
SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24),
@@ -115,7 +116,12 @@ struct event_mailbox {
u8 scheduled_scan_status;
u8 ps_status;
- u8 reserved_5[29];
+ /* AP FW only */
+ u8 hlid_removed;
+ __le16 sta_aging_status;
+ __le16 sta_tx_retry_exceeded;
+
+ u8 reserved_5[24];
} __packed;
int wl1271_event_unmask(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/wl12xx/init.c
index 785a530..70b3dc8 100644
--- a/drivers/net/wireless/wl12xx/init.c
+++ b/drivers/net/wireless/wl12xx/init.c
@@ -30,27 +30,9 @@
#include "acx.h"
#include "cmd.h"
#include "reg.h"
+#include "tx.h"
-static int wl1271_init_hwenc_config(struct wl1271 *wl)
-{
- int ret;
-
- ret = wl1271_acx_feature_cfg(wl);
- if (ret < 0) {
- wl1271_warning("couldn't set feature config");
- return ret;
- }
-
- ret = wl1271_cmd_set_default_wep_key(wl, wl->default_key);
- if (ret < 0) {
- wl1271_warning("couldn't set default key");
- return ret;
- }
-
- return 0;
-}
-
-int wl1271_init_templates_config(struct wl1271 *wl)
+int wl1271_sta_init_templates_config(struct wl1271 *wl)
{
int ret, i;
@@ -118,6 +100,132 @@ int wl1271_init_templates_config(struct wl1271 *wl)
return 0;
}
+static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
+{
+ struct wl12xx_disconn_template *tmpl;
+ int ret;
+
+ tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
+ if (!tmpl) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_DEAUTH);
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP,
+ tmpl, sizeof(*tmpl), 0,
+ wl1271_tx_min_rate_get(wl));
+
+out:
+ kfree(tmpl);
+ return ret;
+}
+
+static int wl1271_ap_init_null_template(struct wl1271 *wl)
+{
+ struct ieee80211_hdr_3addr *nullfunc;
+ int ret;
+
+ nullfunc = kzalloc(sizeof(*nullfunc), GFP_KERNEL);
+ if (!nullfunc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_NULLFUNC |
+ IEEE80211_FCTL_FROMDS);
+
+ /* nullfunc->addr1 is filled by FW */
+
+ memcpy(nullfunc->addr2, wl->mac_addr, ETH_ALEN);
+ memcpy(nullfunc->addr3, wl->mac_addr, ETH_ALEN);
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc,
+ sizeof(*nullfunc), 0,
+ wl1271_tx_min_rate_get(wl));
+
+out:
+ kfree(nullfunc);
+ return ret;
+}
+
+static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
+{
+ struct ieee80211_qos_hdr *qosnull;
+ int ret;
+
+ qosnull = kzalloc(sizeof(*qosnull), GFP_KERNEL);
+ if (!qosnull) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ qosnull->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_NULLFUNC |
+ IEEE80211_FCTL_FROMDS);
+
+ /* qosnull->addr1 is filled by FW */
+
+ memcpy(qosnull->addr2, wl->mac_addr, ETH_ALEN);
+ memcpy(qosnull->addr3, wl->mac_addr, ETH_ALEN);
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull,
+ sizeof(*qosnull), 0,
+ wl1271_tx_min_rate_get(wl));
+
+out:
+ kfree(qosnull);
+ return ret;
+}
+
+static int wl1271_ap_init_templates_config(struct wl1271 *wl)
+{
+ int ret;
+
+ /*
+ * Put very large empty placeholders for all templates. These
+ * reserve memory for later.
+ */
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
+ sizeof
+ (struct wl12xx_probe_resp_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
+ sizeof
+ (struct wl12xx_beacon_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
+ sizeof
+ (struct wl12xx_disconn_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
+ sizeof(struct wl12xx_null_data_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
+ sizeof
+ (struct wl12xx_qos_null_data_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter)
{
int ret;
@@ -145,10 +253,6 @@ int wl1271_init_phy_config(struct wl1271 *wl)
if (ret < 0)
return ret;
- ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
- if (ret < 0)
- return ret;
-
ret = wl1271_acx_service_period_timeout(wl);
if (ret < 0)
return ret;
@@ -213,11 +317,186 @@ static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
return 0;
}
+static int wl1271_sta_hw_init(struct wl1271 *wl)
+{
+ int ret;
+
+ ret = wl1271_cmd_ext_radio_parms(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_sta_init_templates_config(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize connection monitoring thresholds */
+ ret = wl1271_acx_conn_monit_params(wl, false);
+ if (ret < 0)
+ return ret;
+
+ /* Beacon filtering */
+ ret = wl1271_init_beacon_filter(wl);
+ if (ret < 0)
+ return ret;
+
+ /* Bluetooth WLAN coexistence */
+ ret = wl1271_init_pta(wl);
+ if (ret < 0)
+ return ret;
+
+ /* Beacons and broadcast settings */
+ ret = wl1271_init_beacon_broadcast(wl);
+ if (ret < 0)
+ return ret;
+
+ /* Configure for ELP power saving */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
+ if (ret < 0)
+ return ret;
+
+ /* Configure rssi/snr averaging weights */
+ ret = wl1271_acx_rssi_snr_avg_weights(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_sta_rate_policies(wl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl)
+{
+ int ret, i;
+
+ ret = wl1271_cmd_set_sta_default_wep_key(wl, wl->default_key);
+ if (ret < 0) {
+ wl1271_warning("couldn't set default key");
+ return ret;
+ }
+
+ /* disable all keep-alive templates */
+ for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
+ ret = wl1271_acx_keep_alive_config(wl, i,
+ ACX_KEEP_ALIVE_TPL_INVALID);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* disable the keep-alive feature */
+ ret = wl1271_acx_keep_alive_mode(wl, false);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int wl1271_ap_hw_init(struct wl1271 *wl)
+{
+ int ret, i;
+
+ ret = wl1271_ap_init_templates_config(wl);
+ if (ret < 0)
+ return ret;
+
+ /* Configure for power always on */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+ if (ret < 0)
+ return ret;
+
+ /* Configure initial TX rate classes */
+ for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
+ ret = wl1271_acx_ap_rate_policy(wl,
+ &wl->conf.tx.ap_rc_conf[i], i);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = wl1271_acx_ap_rate_policy(wl,
+ &wl->conf.tx.ap_mgmt_conf,
+ ACX_TX_AP_MODE_MGMT_RATE);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_ap_rate_policy(wl,
+ &wl->conf.tx.ap_bcst_conf,
+ ACX_TX_AP_MODE_BCST_RATE);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_acx_max_tx_retry(wl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl)
+{
+ int ret;
+
+ ret = wl1271_ap_init_deauth_template(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_ap_init_null_template(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_ap_init_qos_null_template(wl);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void wl1271_check_ba_support(struct wl1271 *wl)
+{
+ /* validate FW cose ver x.x.x.50-60.x */
+ if ((wl->chip.fw_ver[3] >= WL12XX_BA_SUPPORT_FW_COST_VER2_START) &&
+ (wl->chip.fw_ver[3] < WL12XX_BA_SUPPORT_FW_COST_VER2_END)) {
+ wl->ba_support = true;
+ return;
+ }
+
+ wl->ba_support = false;
+}
+
+static int wl1271_set_ba_policies(struct wl1271 *wl)
+{
+ u8 tid_index;
+ u8 ret = 0;
+
+ /* Reset the BA RX indicators */
+ wl->ba_rx_bitmap = 0;
+
+ /* validate that FW support BA */
+ wl1271_check_ba_support(wl);
+
+ if (wl->ba_support)
+ /* 802.11n initiator BA session setting */
+ for (tid_index = 0; tid_index < CONF_TX_MAX_TID_COUNT;
+ ++tid_index) {
+ ret = wl1271_acx_set_ba_session(wl, WLAN_BACK_INITIATOR,
+ tid_index, true);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
int wl1271_hw_init(struct wl1271 *wl)
{
struct conf_tx_ac_category *conf_ac;
struct conf_tx_tid *conf_tid;
int ret, i;
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
ret = wl1271_cmd_general_parms(wl);
if (ret < 0)
@@ -227,12 +506,12 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
return ret;
- ret = wl1271_cmd_ext_radio_parms(wl);
- if (ret < 0)
- return ret;
+ /* Mode specific init */
+ if (is_ap)
+ ret = wl1271_ap_hw_init(wl);
+ else
+ ret = wl1271_sta_hw_init(wl);
- /* Template settings */
- ret = wl1271_init_templates_config(wl);
if (ret < 0)
return ret;
@@ -259,16 +538,6 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* Initialize connection monitoring thresholds */
- ret = wl1271_acx_conn_monit_params(wl, false);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Beacon filtering */
- ret = wl1271_init_beacon_filter(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* Configure TX patch complete interrupt behavior */
ret = wl1271_acx_tx_config_options(wl);
if (ret < 0)
@@ -279,21 +548,11 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* Bluetooth WLAN coexistence */
- ret = wl1271_init_pta(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* Energy detection */
ret = wl1271_init_energy_detection(wl);
if (ret < 0)
goto out_free_memmap;
- /* Beacons and boradcast settings */
- ret = wl1271_init_beacon_broadcast(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* Default fragmentation threshold */
ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
if (ret < 0)
@@ -321,23 +580,13 @@ int wl1271_hw_init(struct wl1271 *wl)
goto out_free_memmap;
}
- /* Configure TX rate classes */
- ret = wl1271_acx_rate_policies(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* Enable data path */
ret = wl1271_cmd_data_path(wl, 1);
if (ret < 0)
goto out_free_memmap;
- /* Configure for ELP power saving */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
- if (ret < 0)
- goto out_free_memmap;
-
/* Configure HW encryption */
- ret = wl1271_init_hwenc_config(wl);
+ ret = wl1271_acx_feature_cfg(wl);
if (ret < 0)
goto out_free_memmap;
@@ -346,21 +595,17 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* disable all keep-alive templates */
- for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
- ret = wl1271_acx_keep_alive_config(wl, i,
- ACX_KEEP_ALIVE_TPL_INVALID);
- if (ret < 0)
- goto out_free_memmap;
- }
+ /* Mode specific init - post mem init */
+ if (is_ap)
+ ret = wl1271_ap_hw_init_post_mem(wl);
+ else
+ ret = wl1271_sta_hw_init_post_mem(wl);
- /* disable the keep-alive feature */
- ret = wl1271_acx_keep_alive_mode(wl, false);
if (ret < 0)
goto out_free_memmap;
- /* Configure rssi/snr averaging weights */
- ret = wl1271_acx_rssi_snr_avg_weights(wl);
+ /* Configure initiator BA sessions policies */
+ ret = wl1271_set_ba_policies(wl);
if (ret < 0)
goto out_free_memmap;
diff --git a/drivers/net/wireless/wl12xx/init.h b/drivers/net/wireless/wl12xx/init.h
index 7762421..3a8bd3f 100644
--- a/drivers/net/wireless/wl12xx/init.h
+++ b/drivers/net/wireless/wl12xx/init.h
@@ -27,7 +27,7 @@
#include "wl12xx.h"
int wl1271_hw_init_power_auth(struct wl1271 *wl);
-int wl1271_init_templates_config(struct wl1271 *wl);
+int wl1271_sta_init_templates_config(struct wl1271 *wl);
int wl1271_init_phy_config(struct wl1271 *wl);
int wl1271_init_pta(struct wl1271 *wl);
int wl1271_init_energy_detection(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index 062247e..254b7da 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -116,11 +116,11 @@ static struct conf_drv_settings default_conf = {
},
.tx = {
.tx_energy_detection = 0,
- .rc_conf = {
+ .sta_rc_conf = {
.enabled_rates = 0,
.short_retry_limit = 10,
.long_retry_limit = 10,
- .aflags = 0
+ .aflags = 0,
},
.ac_conf_count = 4,
.ac_conf = {
@@ -153,6 +153,45 @@ static struct conf_drv_settings default_conf = {
.tx_op_limit = 1504,
},
},
+ .ap_rc_conf = {
+ [0] = {
+ .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ [1] = {
+ .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ [2] = {
+ .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ [3] = {
+ .enabled_rates = CONF_TX_AP_ENABLED_RATES,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ },
+ .ap_mgmt_conf = {
+ .enabled_rates = CONF_TX_AP_DEFAULT_MGMT_RATES,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ .ap_bcst_conf = {
+ .enabled_rates = CONF_HW_BIT_RATE_1MBPS,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ .ap_max_tx_retries = 100,
.tid_conf_count = 4,
.tid_conf = {
[CONF_TX_AC_BE] = {
@@ -193,6 +232,8 @@ static struct conf_drv_settings default_conf = {
.tx_compl_threshold = 4,
.basic_rate = CONF_HW_BIT_RATE_1MBPS,
.basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
+ .tmpl_short_retry_limit = 10,
+ .tmpl_long_retry_limit = 10,
},
.conn = {
.wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
@@ -233,13 +274,13 @@ static struct conf_drv_settings default_conf = {
.avg_weight_rssi_beacon = 20,
.avg_weight_rssi_data = 10,
.avg_weight_snr_beacon = 20,
- .avg_weight_snr_data = 10
+ .avg_weight_snr_data = 10,
},
.scan = {
.min_dwell_time_active = 7500,
.max_dwell_time_active = 30000,
- .min_dwell_time_passive = 30000,
- .max_dwell_time_passive = 60000,
+ .min_dwell_time_passive = 100000,
+ .max_dwell_time_passive = 100000,
.num_probe_reqs = 2,
},
.rf = {
@@ -252,9 +293,14 @@ static struct conf_drv_settings default_conf = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
},
+ .ht = {
+ .tx_ba_win_size = 64,
+ .inactivity_timeout = 10000,
+ },
};
static void __wl1271_op_remove_interface(struct wl1271 *wl);
+static void wl1271_free_ap_keys(struct wl1271 *wl);
static void wl1271_device_release(struct device *dev)
@@ -393,7 +439,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
if (ret < 0)
return ret;
- ret = wl1271_init_templates_config(wl);
+ ret = wl1271_sta_init_templates_config(wl);
if (ret < 0)
return ret;
@@ -616,9 +662,26 @@ out:
static int wl1271_fetch_firmware(struct wl1271 *wl)
{
const struct firmware *fw;
+ const char *fw_name;
int ret;
- ret = request_firmware(&fw, WL1271_FW_NAME, wl1271_wl_to_dev(wl));
+ switch (wl->bss_type) {
+ case BSS_TYPE_AP_BSS:
+ fw_name = WL1271_AP_FW_NAME;
+ break;
+ case BSS_TYPE_IBSS:
+ case BSS_TYPE_STA_BSS:
+ fw_name = WL1271_FW_NAME;
+ break;
+ default:
+ wl1271_error("no compatible firmware for bss_type %d",
+ wl->bss_type);
+ return -EINVAL;
+ }
+
+ wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
+
+ ret = request_firmware(&fw, fw_name, wl1271_wl_to_dev(wl));
if (ret < 0) {
wl1271_error("could not get firmware: %d", ret);
@@ -632,6 +695,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
goto out;
}
+ vfree(wl->fw);
wl->fw_len = fw->size;
wl->fw = vmalloc(wl->fw_len);
@@ -642,7 +706,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
}
memcpy(wl->fw, fw->data, wl->fw_len);
-
+ wl->fw_bss_type = wl->bss_type;
ret = 0;
out:
@@ -778,7 +842,8 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
goto out;
}
- if (wl->fw == NULL) {
+ /* Make sure the firmware type matches the BSS type */
+ if (wl->fw == NULL || wl->fw_bss_type != wl->bss_type) {
ret = wl1271_fetch_firmware(wl);
if (ret < 0)
goto out;
@@ -811,6 +876,8 @@ int wl1271_plt_start(struct wl1271 *wl)
goto out;
}
+ wl->bss_type = BSS_TYPE_STA_BSS;
+
while (retries) {
retries--;
ret = wl1271_chip_wakeup(wl);
@@ -827,7 +894,7 @@ int wl1271_plt_start(struct wl1271 *wl)
wl->state = WL1271_STATE_PLT;
wl1271_notice("firmware booted in PLT mode (%s)",
- wl->chip.fw_ver);
+ wl->chip.fw_ver_str);
goto out;
irq_disable:
@@ -854,12 +921,10 @@ out:
return ret;
}
-int wl1271_plt_stop(struct wl1271 *wl)
+int __wl1271_plt_stop(struct wl1271 *wl)
{
int ret = 0;
- mutex_lock(&wl->mutex);
-
wl1271_notice("power down");
if (wl->state != WL1271_STATE_PLT) {
@@ -875,12 +940,21 @@ int wl1271_plt_stop(struct wl1271 *wl)
wl->state = WL1271_STATE_OFF;
wl->rx_counter = 0;
-out:
mutex_unlock(&wl->mutex);
-
cancel_work_sync(&wl->irq_work);
cancel_work_sync(&wl->recovery_work);
+ mutex_lock(&wl->mutex);
+out:
+ return ret;
+}
+
+int wl1271_plt_stop(struct wl1271 *wl)
+{
+ int ret;
+ mutex_lock(&wl->mutex);
+ ret = __wl1271_plt_stop(wl);
+ mutex_unlock(&wl->mutex);
return ret;
}
@@ -902,7 +976,8 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
spin_lock_irqsave(&wl->wl_lock, flags);
if (sta &&
(sta->supp_rates[conf->channel->band] !=
- (wl->sta_rate_set & HW_BG_RATES_MASK))) {
+ (wl->sta_rate_set & HW_BG_RATES_MASK)) &&
+ wl->bss_type != BSS_TYPE_AP_BSS) {
wl->sta_rate_set = sta->supp_rates[conf->channel->band];
set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
}
@@ -967,6 +1042,9 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
*
* The MAC address is first known when the corresponding interface
* is added. That is where we will initialize the hardware.
+ *
+ * In addition, we currently have different firmwares for AP and managed
+ * operation. We will know which to boot according to interface type.
*/
return 0;
@@ -1006,6 +1084,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
wl->bss_type = BSS_TYPE_IBSS;
wl->set_bss_type = BSS_TYPE_STA_BSS;
break;
+ case NL80211_IFTYPE_AP:
+ wl->bss_type = BSS_TYPE_AP_BSS;
+ break;
default:
ret = -EOPNOTSUPP;
goto out;
@@ -1061,11 +1142,11 @@ power_off:
wl->vif = vif;
wl->state = WL1271_STATE_ON;
- wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
+ wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
/* update hw/fw version info in wiphy struct */
wiphy->hw_version = wl->chip.id;
- strncpy(wiphy->fw_version, wl->chip.fw_ver,
+ strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
sizeof(wiphy->fw_version));
/*
@@ -1151,6 +1232,8 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
wl->flags = 0;
wl->vif = NULL;
wl->filters = 0;
+ wl1271_free_ap_keys(wl);
+ memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map));
for (i = 0; i < NUM_TX_QUEUES; i++)
wl->tx_blocks_freed[i] = 0;
@@ -1186,8 +1269,7 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
static void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters)
{
- wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
- wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
+ wl1271_set_default_filters(wl);
/* combine requested filters with current filter config */
filters = wl->filters | filters;
@@ -1322,25 +1404,7 @@ static void wl1271_set_band_rate(struct wl1271 *wl)
wl->basic_rate_set = wl->conf.tx.basic_rate_5;
}
-static u32 wl1271_min_rate_get(struct wl1271 *wl)
-{
- int i;
- u32 rate = 0;
-
- if (!wl->basic_rate_set) {
- WARN_ON(1);
- wl->basic_rate_set = wl->conf.tx.basic_rate;
- }
-
- for (i = 0; !rate; i++) {
- if ((wl->basic_rate_set >> i) & 0x1)
- rate = 1 << i;
- }
-
- return rate;
-}
-
-static int wl1271_handle_idle(struct wl1271 *wl, bool idle)
+static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
{
int ret;
@@ -1350,9 +1414,9 @@ static int wl1271_handle_idle(struct wl1271 *wl, bool idle)
if (ret < 0)
goto out;
}
- wl->rate_set = wl1271_min_rate_get(wl);
+ wl->rate_set = wl1271_tx_min_rate_get(wl);
wl->sta_rate_set = 0;
- ret = wl1271_acx_rate_policies(wl);
+ ret = wl1271_acx_sta_rate_policies(wl);
if (ret < 0)
goto out;
ret = wl1271_acx_keep_alive_config(
@@ -1381,14 +1445,17 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
struct wl1271 *wl = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
int channel, ret = 0;
+ bool is_ap;
channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
- wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s",
+ wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
+ " changed 0x%x",
channel,
conf->flags & IEEE80211_CONF_PS ? "on" : "off",
conf->power_level,
- conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use");
+ conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
+ changed);
/*
* mac80211 will go to idle nearly immediately after transmitting some
@@ -1406,6 +1473,8 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
goto out;
}
+ is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+
ret = wl1271_ps_elp_wakeup(wl, false);
if (ret < 0)
goto out;
@@ -1417,31 +1486,34 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
wl->band = conf->channel->band;
wl->channel = channel;
- /*
- * FIXME: the mac80211 should really provide a fixed rate
- * to use here. for now, just use the smallest possible rate
- * for the band as a fixed rate for association frames and
- * other control messages.
- */
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
- wl1271_set_band_rate(wl);
-
- wl->basic_rate = wl1271_min_rate_get(wl);
- ret = wl1271_acx_rate_policies(wl);
- if (ret < 0)
- wl1271_warning("rate policy for update channel "
- "failed %d", ret);
+ if (!is_ap) {
+ /*
+ * FIXME: the mac80211 should really provide a fixed
+ * rate to use here. for now, just use the smallest
+ * possible rate for the band as a fixed rate for
+ * association frames and other control messages.
+ */
+ if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ wl1271_set_band_rate(wl);
- if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
- ret = wl1271_join(wl, false);
+ wl->basic_rate = wl1271_tx_min_rate_get(wl);
+ ret = wl1271_acx_sta_rate_policies(wl);
if (ret < 0)
- wl1271_warning("cmd join to update channel "
+ wl1271_warning("rate policy for channel "
"failed %d", ret);
+
+ if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
+ ret = wl1271_join(wl, false);
+ if (ret < 0)
+ wl1271_warning("cmd join on channel "
+ "failed %d", ret);
+ }
}
}
- if (changed & IEEE80211_CONF_CHANGE_IDLE) {
- ret = wl1271_handle_idle(wl, conf->flags & IEEE80211_CONF_IDLE);
+ if (changed & IEEE80211_CONF_CHANGE_IDLE && !is_ap) {
+ ret = wl1271_sta_handle_idle(wl,
+ conf->flags & IEEE80211_CONF_IDLE);
if (ret < 0)
wl1271_warning("idle mode change failed %d", ret);
}
@@ -1548,7 +1620,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
struct wl1271 *wl = hw->priv;
int ret;
- wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter");
+ wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
+ " total %x", changed, *total);
mutex_lock(&wl->mutex);
@@ -1562,15 +1635,16 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
-
- if (*total & FIF_ALLMULTI)
- ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
- else if (fp)
- ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
- fp->mc_list,
- fp->mc_list_length);
- if (ret < 0)
- goto out_sleep;
+ if (wl->bss_type != BSS_TYPE_AP_BSS) {
+ if (*total & FIF_ALLMULTI)
+ ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
+ else if (fp)
+ ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
+ fp->mc_list,
+ fp->mc_list_length);
+ if (ret < 0)
+ goto out_sleep;
+ }
/* determine, whether supported filter values have changed */
if (changed == 0)
@@ -1593,38 +1667,192 @@ out:
kfree(fp);
}
+static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+ u16 tx_seq_16)
+{
+ struct wl1271_ap_key *ap_key;
+ int i;
+
+ wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
+
+ if (key_size > MAX_KEY_SIZE)
+ return -EINVAL;
+
+ /*
+ * Find next free entry in ap_keys. Also check we are not replacing
+ * an existing key.
+ */
+ for (i = 0; i < MAX_NUM_KEYS; i++) {
+ if (wl->recorded_ap_keys[i] == NULL)
+ break;
+
+ if (wl->recorded_ap_keys[i]->id == id) {
+ wl1271_warning("trying to record key replacement");
+ return -EINVAL;
+ }
+ }
+
+ if (i == MAX_NUM_KEYS)
+ return -EBUSY;
+
+ ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
+ if (!ap_key)
+ return -ENOMEM;
+
+ ap_key->id = id;
+ ap_key->key_type = key_type;
+ ap_key->key_size = key_size;
+ memcpy(ap_key->key, key, key_size);
+ ap_key->hlid = hlid;
+ ap_key->tx_seq_32 = tx_seq_32;
+ ap_key->tx_seq_16 = tx_seq_16;
+
+ wl->recorded_ap_keys[i] = ap_key;
+ return 0;
+}
+
+static void wl1271_free_ap_keys(struct wl1271 *wl)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_KEYS; i++) {
+ kfree(wl->recorded_ap_keys[i]);
+ wl->recorded_ap_keys[i] = NULL;
+ }
+}
+
+static int wl1271_ap_init_hwenc(struct wl1271 *wl)
+{
+ int i, ret = 0;
+ struct wl1271_ap_key *key;
+ bool wep_key_added = false;
+
+ for (i = 0; i < MAX_NUM_KEYS; i++) {
+ if (wl->recorded_ap_keys[i] == NULL)
+ break;
+
+ key = wl->recorded_ap_keys[i];
+ ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE,
+ key->id, key->key_type,
+ key->key_size, key->key,
+ key->hlid, key->tx_seq_32,
+ key->tx_seq_16);
+ if (ret < 0)
+ goto out;
+
+ if (key->key_type == KEY_WEP)
+ wep_key_added = true;
+ }
+
+ if (wep_key_added) {
+ ret = wl1271_cmd_set_ap_default_wep_key(wl, wl->default_key);
+ if (ret < 0)
+ goto out;
+ }
+
+out:
+ wl1271_free_ap_keys(wl);
+ return ret;
+}
+
+static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, u32 tx_seq_32,
+ u16 tx_seq_16, struct ieee80211_sta *sta)
+{
+ int ret;
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+
+ if (is_ap) {
+ struct wl1271_station *wl_sta;
+ u8 hlid;
+
+ if (sta) {
+ wl_sta = (struct wl1271_station *)sta->drv_priv;
+ hlid = wl_sta->hlid;
+ } else {
+ hlid = WL1271_AP_BROADCAST_HLID;
+ }
+
+ if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+ /*
+ * We do not support removing keys after AP shutdown.
+ * Pretend we do to make mac80211 happy.
+ */
+ if (action != KEY_ADD_OR_REPLACE)
+ return 0;
+
+ ret = wl1271_record_ap_key(wl, id,
+ key_type, key_size,
+ key, hlid, tx_seq_32,
+ tx_seq_16);
+ } else {
+ ret = wl1271_cmd_set_ap_key(wl, action,
+ id, key_type, key_size,
+ key, hlid, tx_seq_32,
+ tx_seq_16);
+ }
+
+ if (ret < 0)
+ return ret;
+ } else {
+ const u8 *addr;
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+
+ addr = sta ? sta->addr : bcast_addr;
+
+ if (is_zero_ether_addr(addr)) {
+ /* We dont support TX only encryption */
+ return -EOPNOTSUPP;
+ }
+
+ /* The wl1271 does not allow to remove unicast keys - they
+ will be cleared automatically on next CMD_JOIN. Ignore the
+ request silently, as we dont want the mac80211 to emit
+ an error message. */
+ if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
+ return 0;
+
+ ret = wl1271_cmd_set_sta_key(wl, action,
+ id, key_type, key_size,
+ key, addr, tx_seq_32,
+ tx_seq_16);
+ if (ret < 0)
+ return ret;
+
+ /* the default WEP key needs to be configured at least once */
+ if (key_type == KEY_WEP) {
+ ret = wl1271_cmd_set_sta_default_wep_key(wl,
+ wl->default_key);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key_conf)
{
struct wl1271 *wl = hw->priv;
- const u8 *addr;
int ret;
u32 tx_seq_32 = 0;
u16 tx_seq_16 = 0;
u8 key_type;
- static const u8 bcast_addr[ETH_ALEN] =
- { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
-
wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
- addr = sta ? sta->addr : bcast_addr;
-
- wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd);
- wl1271_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN);
+ wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
key_conf->cipher, key_conf->keyidx,
key_conf->keylen, key_conf->flags);
wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
- if (is_zero_ether_addr(addr)) {
- /* We dont support TX only encryption */
- ret = -EOPNOTSUPP;
- goto out;
- }
-
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WL1271_STATE_OFF)) {
@@ -1671,36 +1899,21 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (cmd) {
case SET_KEY:
- ret = wl1271_cmd_set_key(wl, KEY_ADD_OR_REPLACE,
- key_conf->keyidx, key_type,
- key_conf->keylen, key_conf->key,
- addr, tx_seq_32, tx_seq_16);
+ ret = wl1271_set_key(wl, KEY_ADD_OR_REPLACE,
+ key_conf->keyidx, key_type,
+ key_conf->keylen, key_conf->key,
+ tx_seq_32, tx_seq_16, sta);
if (ret < 0) {
wl1271_error("Could not add or replace key");
goto out_sleep;
}
-
- /* the default WEP key needs to be configured at least once */
- if (key_type == KEY_WEP) {
- ret = wl1271_cmd_set_default_wep_key(wl,
- wl->default_key);
- if (ret < 0)
- goto out_sleep;
- }
break;
case DISABLE_KEY:
- /* The wl1271 does not allow to remove unicast keys - they
- will be cleared automatically on next CMD_JOIN. Ignore the
- request silently, as we dont want the mac80211 to emit
- an error message. */
- if (!is_broadcast_ether_addr(addr))
- break;
-
- ret = wl1271_cmd_set_key(wl, KEY_REMOVE,
- key_conf->keyidx, key_type,
- key_conf->keylen, key_conf->key,
- addr, 0, 0);
+ ret = wl1271_set_key(wl, KEY_REMOVE,
+ key_conf->keyidx, key_type,
+ key_conf->keylen, key_conf->key,
+ 0, 0, sta);
if (ret < 0) {
wl1271_error("Could not remove key");
goto out_sleep;
@@ -1719,7 +1932,6 @@ out_sleep:
out_unlock:
mutex_unlock(&wl->mutex);
-out:
return ret;
}
@@ -1821,7 +2033,7 @@ out:
return ret;
}
-static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
+static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
int offset)
{
u8 *ptr = skb->data + offset;
@@ -1831,89 +2043,210 @@ static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
if (ptr[0] == WLAN_EID_SSID) {
wl->ssid_len = ptr[1];
memcpy(wl->ssid, ptr+2, wl->ssid_len);
- return;
+ return 0;
}
ptr += (ptr[1] + 2);
}
+
wl1271_error("No SSID in IEs!\n");
+ return -ENOENT;
}
-static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
+static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
- enum wl1271_cmd_ps_mode mode;
- struct wl1271 *wl = hw->priv;
- struct ieee80211_sta *sta = ieee80211_find_sta(vif, bss_conf->bssid);
- bool do_join = false;
- bool set_assoc = false;
- int ret;
+ int ret = 0;
- wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed");
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ if (bss_conf->use_short_slot)
+ ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
+ else
+ ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
+ if (ret < 0) {
+ wl1271_warning("Set slot time failed %d", ret);
+ goto out;
+ }
+ }
- mutex_lock(&wl->mutex);
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ if (bss_conf->use_short_preamble)
+ wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
+ else
+ wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
+ }
- if (unlikely(wl->state == WL1271_STATE_OFF))
- goto out;
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ if (bss_conf->use_cts_prot)
+ ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
+ else
+ ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
+ if (ret < 0) {
+ wl1271_warning("Set ctsprotect failed %d", ret);
+ goto out;
+ }
+ }
- ret = wl1271_ps_elp_wakeup(wl, false);
- if (ret < 0)
- goto out;
+out:
+ return ret;
+}
- if ((changed & BSS_CHANGED_BEACON_INT) &&
- (wl->bss_type == BSS_TYPE_IBSS)) {
- wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon interval updated: %d",
+static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ int ret = 0;
+
+ if ((changed & BSS_CHANGED_BEACON_INT)) {
+ wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
bss_conf->beacon_int);
wl->beacon_int = bss_conf->beacon_int;
- do_join = true;
}
- if ((changed & BSS_CHANGED_BEACON) &&
- (wl->bss_type == BSS_TYPE_IBSS)) {
- struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
+ if ((changed & BSS_CHANGED_BEACON)) {
+ struct ieee80211_hdr *hdr;
+ int ieoffset = offsetof(struct ieee80211_mgmt,
+ u.beacon.variable);
+ struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
+ u16 tmpl_id;
- wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon updated");
+ if (!beacon)
+ goto out;
- if (beacon) {
- struct ieee80211_hdr *hdr;
- int ieoffset = offsetof(struct ieee80211_mgmt,
- u.beacon.variable);
+ wl1271_debug(DEBUG_MASTER, "beacon updated");
- wl1271_ssid_set(wl, beacon, ieoffset);
+ ret = wl1271_ssid_set(wl, beacon, ieoffset);
+ if (ret < 0) {
+ dev_kfree_skb(beacon);
+ goto out;
+ }
+ tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
+ CMD_TEMPL_BEACON;
+ ret = wl1271_cmd_template_set(wl, tmpl_id,
+ beacon->data,
+ beacon->len, 0,
+ wl1271_tx_min_rate_get(wl));
+ if (ret < 0) {
+ dev_kfree_skb(beacon);
+ goto out;
+ }
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
- beacon->data,
- beacon->len, 0,
- wl1271_min_rate_get(wl));
+ hdr = (struct ieee80211_hdr *) beacon->data;
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_PROBE_RESP);
+
+ tmpl_id = is_ap ? CMD_TEMPL_AP_PROBE_RESPONSE :
+ CMD_TEMPL_PROBE_RESPONSE;
+ ret = wl1271_cmd_template_set(wl,
+ tmpl_id,
+ beacon->data,
+ beacon->len, 0,
+ wl1271_tx_min_rate_get(wl));
+ dev_kfree_skb(beacon);
+ if (ret < 0)
+ goto out;
+ }
- if (ret < 0) {
- dev_kfree_skb(beacon);
- goto out_sleep;
- }
+out:
+ return ret;
+}
- hdr = (struct ieee80211_hdr *) beacon->data;
- hdr->frame_control = cpu_to_le16(
- IEEE80211_FTYPE_MGMT |
- IEEE80211_STYPE_PROBE_RESP);
+/* AP mode changes */
+static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ int ret = 0;
- ret = wl1271_cmd_template_set(wl,
- CMD_TEMPL_PROBE_RESPONSE,
- beacon->data,
- beacon->len, 0,
- wl1271_min_rate_get(wl));
- dev_kfree_skb(beacon);
- if (ret < 0)
- goto out_sleep;
+ if ((changed & BSS_CHANGED_BASIC_RATES)) {
+ u32 rates = bss_conf->basic_rates;
+ struct conf_tx_rate_class mgmt_rc;
+
+ wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates);
+ wl->basic_rate = wl1271_tx_min_rate_get(wl);
+ wl1271_debug(DEBUG_AP, "basic rates: 0x%x",
+ wl->basic_rate_set);
+
+ /* update the AP management rate policy with the new rates */
+ mgmt_rc.enabled_rates = wl->basic_rate_set;
+ mgmt_rc.long_retry_limit = 10;
+ mgmt_rc.short_retry_limit = 10;
+ mgmt_rc.aflags = 0;
+ ret = wl1271_acx_ap_rate_policy(wl, &mgmt_rc,
+ ACX_TX_AP_MODE_MGMT_RATE);
+ if (ret < 0) {
+ wl1271_error("AP mgmt policy change failed %d", ret);
+ goto out;
+ }
+ }
- /* Need to update the SSID (for filtering etc) */
- do_join = true;
+ ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
+ if (ret < 0)
+ goto out;
+
+ if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
+ if (bss_conf->enable_beacon) {
+ if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+ ret = wl1271_cmd_start_bss(wl);
+ if (ret < 0)
+ goto out;
+
+ set_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+ wl1271_debug(DEBUG_AP, "started AP");
+
+ ret = wl1271_ap_init_hwenc(wl);
+ if (ret < 0)
+ goto out;
+ }
+ } else {
+ if (test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+ ret = wl1271_cmd_stop_bss(wl);
+ if (ret < 0)
+ goto out;
+
+ clear_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+ wl1271_debug(DEBUG_AP, "stopped AP");
+ }
}
}
- if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
- (wl->bss_type == BSS_TYPE_IBSS)) {
+ ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+ if (ret < 0)
+ goto out;
+out:
+ return;
+}
+
+/* STA/IBSS mode changes */
+static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
+{
+ bool do_join = false, set_assoc = false;
+ bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
+ int ret;
+ struct ieee80211_sta *sta;
+
+ if (is_ibss) {
+ ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
+ changed);
+ if (ret < 0)
+ goto out;
+ }
+
+ if ((changed & BSS_CHANGED_BEACON_INT) && is_ibss)
+ do_join = true;
+
+ /* Need to update the SSID (for filtering etc) */
+ if ((changed & BSS_CHANGED_BEACON) && is_ibss)
+ do_join = true;
+
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) && is_ibss) {
wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
bss_conf->enable_beacon ? "enabled" : "disabled");
@@ -1924,7 +2257,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
do_join = true;
}
- if (changed & BSS_CHANGED_CQM) {
+ if ((changed & BSS_CHANGED_CQM)) {
bool enable = false;
if (bss_conf->cqm_rssi_thold)
enable = true;
@@ -1942,24 +2275,26 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
* and enable the BSSID filter
*/
memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
- memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
+ memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
+ if (!is_zero_ether_addr(wl->bssid)) {
ret = wl1271_cmd_build_null_data(wl);
if (ret < 0)
- goto out_sleep;
+ goto out;
ret = wl1271_build_qos_null_data(wl);
if (ret < 0)
- goto out_sleep;
+ goto out;
/* filter out all packets not from this BSSID */
wl1271_configure_filters(wl, 0);
/* Need to update the BSSID (for filtering etc) */
do_join = true;
+ }
}
- if (changed & BSS_CHANGED_ASSOC) {
+ if ((changed & BSS_CHANGED_ASSOC)) {
if (bss_conf->assoc) {
u32 rates;
int ieoffset;
@@ -1975,10 +2310,10 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
rates = bss_conf->basic_rates;
wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl,
rates);
- wl->basic_rate = wl1271_min_rate_get(wl);
- ret = wl1271_acx_rate_policies(wl);
+ wl->basic_rate = wl1271_tx_min_rate_get(wl);
+ ret = wl1271_acx_sta_rate_policies(wl);
if (ret < 0)
- goto out_sleep;
+ goto out;
/*
* with wl1271, we don't need to update the
@@ -1988,7 +2323,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
*/
ret = wl1271_cmd_build_ps_poll(wl, wl->aid);
if (ret < 0)
- goto out_sleep;
+ goto out;
/*
* Get a template for hardware connection maintenance
@@ -2002,17 +2337,19 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
/* enable the connection monitoring feature */
ret = wl1271_acx_conn_monit_params(wl, true);
if (ret < 0)
- goto out_sleep;
+ goto out;
/* If we want to go in PSM but we're not there yet */
if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ enum wl1271_cmd_ps_mode mode;
+
mode = STATION_POWER_SAVE_MODE;
ret = wl1271_ps_set_mode(wl, mode,
wl->basic_rate,
true);
if (ret < 0)
- goto out_sleep;
+ goto out;
}
} else {
/* use defaults when not associated */
@@ -2029,10 +2366,10 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
/* revert back to minimum rates for the current band */
wl1271_set_band_rate(wl);
- wl->basic_rate = wl1271_min_rate_get(wl);
- ret = wl1271_acx_rate_policies(wl);
+ wl->basic_rate = wl1271_tx_min_rate_get(wl);
+ ret = wl1271_acx_sta_rate_policies(wl);
if (ret < 0)
- goto out_sleep;
+ goto out;
/* disable connection monitor features */
ret = wl1271_acx_conn_monit_params(wl, false);
@@ -2040,74 +2377,54 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
/* Disable the keep-alive feature */
ret = wl1271_acx_keep_alive_mode(wl, false);
if (ret < 0)
- goto out_sleep;
+ goto out;
/* restore the bssid filter and go to dummy bssid */
wl1271_unjoin(wl);
wl1271_dummy_join(wl);
}
-
- }
-
- if (changed & BSS_CHANGED_ERP_SLOT) {
- if (bss_conf->use_short_slot)
- ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
- else
- ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
- if (ret < 0) {
- wl1271_warning("Set slot time failed %d", ret);
- goto out_sleep;
- }
- }
-
- if (changed & BSS_CHANGED_ERP_PREAMBLE) {
- if (bss_conf->use_short_preamble)
- wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
- else
- wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
}
- if (changed & BSS_CHANGED_ERP_CTS_PROT) {
- if (bss_conf->use_cts_prot)
- ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
- else
- ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
- if (ret < 0) {
- wl1271_warning("Set ctsprotect failed %d", ret);
- goto out_sleep;
- }
- }
+ ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+ if (ret < 0)
+ goto out;
- /*
- * Takes care of: New association with HT enable,
- * HT information change in beacon.
- */
- if (sta &&
- (changed & BSS_CHANGED_HT) &&
- (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
- ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true);
- if (ret < 0) {
- wl1271_warning("Set ht cap true failed %d", ret);
- goto out_sleep;
- }
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (sta) {
+ /* handle new association with HT and HT information change */
+ if ((changed & BSS_CHANGED_HT) &&
+ (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
+ ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap,
+ true);
+ if (ret < 0) {
+ wl1271_warning("Set ht cap true failed %d",
+ ret);
+ rcu_read_unlock();
+ goto out;
+ }
ret = wl1271_acx_set_ht_information(wl,
- bss_conf->ht_operation_mode);
- if (ret < 0) {
- wl1271_warning("Set ht information failed %d", ret);
- goto out_sleep;
+ bss_conf->ht_operation_mode);
+ if (ret < 0) {
+ wl1271_warning("Set ht information failed %d",
+ ret);
+ rcu_read_unlock();
+ goto out;
+ }
}
- }
- /*
- * Takes care of: New association without HT,
- * Disassociation.
- */
- else if (sta && (changed & BSS_CHANGED_ASSOC)) {
- ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, false);
- if (ret < 0) {
- wl1271_warning("Set ht cap false failed %d", ret);
- goto out_sleep;
+ /* handle new association without HT and disassociation */
+ else if (changed & BSS_CHANGED_ASSOC) {
+ ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap,
+ false);
+ if (ret < 0) {
+ wl1271_warning("Set ht cap false failed %d",
+ ret);
+ rcu_read_unlock();
+ goto out;
+ }
}
}
+ rcu_read_unlock();
if (changed & BSS_CHANGED_ARP_FILTER) {
__be32 addr = bss_conf->arp_addr_list[0];
@@ -2124,76 +2441,128 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
ret = wl1271_cmd_build_arp_rsp(wl, addr);
if (ret < 0) {
wl1271_warning("build arp rsp failed: %d", ret);
- goto out_sleep;
+ goto out;
}
ret = wl1271_acx_arp_ip_filter(wl,
- (ACX_ARP_FILTER_ARP_FILTERING |
- ACX_ARP_FILTER_AUTO_ARP),
+ ACX_ARP_FILTER_ARP_FILTERING,
addr);
} else
ret = wl1271_acx_arp_ip_filter(wl, 0, addr);
if (ret < 0)
- goto out_sleep;
+ goto out;
}
if (do_join) {
ret = wl1271_join(wl, set_assoc);
if (ret < 0) {
wl1271_warning("cmd join failed %d", ret);
- goto out_sleep;
+ goto out;
}
}
-out_sleep:
- wl1271_ps_elp_sleep(wl);
-
out:
- mutex_unlock(&wl->mutex);
+ return;
}
-static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
- const struct ieee80211_tx_queue_params *params)
+static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changed)
{
struct wl1271 *wl = hw->priv;
- u8 ps_scheme;
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
int ret;
- mutex_lock(&wl->mutex);
+ wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
+ (int)changed);
- wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
+ mutex_lock(&wl->mutex);
- if (unlikely(wl->state == WL1271_STATE_OFF)) {
- ret = -EAGAIN;
+ if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- }
ret = wl1271_ps_elp_wakeup(wl, false);
if (ret < 0)
goto out;
- /* the txop is confed in units of 32us by the mac80211, we need us */
- ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
- params->cw_min, params->cw_max,
- params->aifs, params->txop << 5);
- if (ret < 0)
- goto out_sleep;
+ if (is_ap)
+ wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
+ else
+ wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
+
+ wl1271_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+}
+
+static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct wl1271 *wl = hw->priv;
+ u8 ps_scheme;
+ int ret = 0;
+
+ mutex_lock(&wl->mutex);
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
if (params->uapsd)
ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
else
ps_scheme = CONF_PS_SCHEME_LEGACY;
- ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
- CONF_CHANNEL_TYPE_EDCF,
- wl1271_tx_get_queue(queue),
- ps_scheme, CONF_ACK_POLICY_LEGACY, 0, 0);
- if (ret < 0)
- goto out_sleep;
+ if (wl->state == WL1271_STATE_OFF) {
+ /*
+ * If the state is off, the parameters will be recorded and
+ * configured on init. This happens in AP-mode.
+ */
+ struct conf_tx_ac_category *conf_ac =
+ &wl->conf.tx.ac_conf[wl1271_tx_get_queue(queue)];
+ struct conf_tx_tid *conf_tid =
+ &wl->conf.tx.tid_conf[wl1271_tx_get_queue(queue)];
+
+ conf_ac->ac = wl1271_tx_get_queue(queue);
+ conf_ac->cw_min = (u8)params->cw_min;
+ conf_ac->cw_max = params->cw_max;
+ conf_ac->aifsn = params->aifs;
+ conf_ac->tx_op_limit = params->txop << 5;
+
+ conf_tid->queue_id = wl1271_tx_get_queue(queue);
+ conf_tid->channel_type = CONF_CHANNEL_TYPE_EDCF;
+ conf_tid->tsid = wl1271_tx_get_queue(queue);
+ conf_tid->ps_scheme = ps_scheme;
+ conf_tid->ack_policy = CONF_ACK_POLICY_LEGACY;
+ conf_tid->apsd_conf[0] = 0;
+ conf_tid->apsd_conf[1] = 0;
+ } else {
+ ret = wl1271_ps_elp_wakeup(wl, false);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * the txop is confed in units of 32us by the mac80211,
+ * we need us
+ */
+ ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
+ params->cw_min, params->cw_max,
+ params->aifs, params->txop << 5);
+ if (ret < 0)
+ goto out_sleep;
+
+ ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
+ CONF_CHANNEL_TYPE_EDCF,
+ wl1271_tx_get_queue(queue),
+ ps_scheme, CONF_ACK_POLICY_LEGACY,
+ 0, 0);
+ if (ret < 0)
+ goto out_sleep;
out_sleep:
- wl1271_ps_elp_sleep(wl);
+ wl1271_ps_elp_sleep(wl);
+ }
out:
mutex_unlock(&wl->mutex);
@@ -2247,6 +2616,173 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
return 0;
}
+static int wl1271_allocate_hlid(struct wl1271 *wl,
+ struct ieee80211_sta *sta,
+ u8 *hlid)
+{
+ struct wl1271_station *wl_sta;
+ int id;
+
+ id = find_first_zero_bit(wl->ap_hlid_map, AP_MAX_STATIONS);
+ if (id >= AP_MAX_STATIONS) {
+ wl1271_warning("could not allocate HLID - too much stations");
+ return -EBUSY;
+ }
+
+ wl_sta = (struct wl1271_station *)sta->drv_priv;
+
+ __set_bit(id, wl->ap_hlid_map);
+ wl_sta->hlid = WL1271_AP_STA_HLID_START + id;
+ *hlid = wl_sta->hlid;
+ return 0;
+}
+
+static void wl1271_free_hlid(struct wl1271 *wl, u8 hlid)
+{
+ int id = hlid - WL1271_AP_STA_HLID_START;
+
+ __clear_bit(id, wl->ap_hlid_map);
+}
+
+static int wl1271_op_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wl1271 *wl = hw->priv;
+ int ret = 0;
+ u8 hlid;
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ if (wl->bss_type != BSS_TYPE_AP_BSS)
+ goto out;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
+
+ ret = wl1271_allocate_hlid(wl, sta, &hlid);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl, false);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_cmd_add_sta(wl, sta, hlid);
+ if (ret < 0)
+ goto out_sleep;
+
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+ return ret;
+}
+
+static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct wl1271 *wl = hw->priv;
+ struct wl1271_station *wl_sta;
+ int ret = 0, id;
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ if (wl->bss_type != BSS_TYPE_AP_BSS)
+ goto out;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
+
+ wl_sta = (struct wl1271_station *)sta->drv_priv;
+ id = wl_sta->hlid - WL1271_AP_STA_HLID_START;
+ if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl, false);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_cmd_remove_sta(wl, wl_sta->hlid);
+ if (ret < 0)
+ goto out_sleep;
+
+ wl1271_free_hlid(wl, wl_sta->hlid);
+
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+ return ret;
+}
+
+int wl1271_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
+{
+ struct wl1271 *wl = hw->priv;
+ int ret;
+
+ mutex_lock(&wl->mutex);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl, false);
+ if (ret < 0)
+ goto out;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ if (wl->ba_support) {
+ ret = wl1271_acx_set_ba_receiver_session(wl, tid, *ssn,
+ true);
+ if (!ret)
+ wl->ba_rx_bitmap |= BIT(tid);
+ } else {
+ ret = -ENOTSUPP;
+ }
+ break;
+
+ case IEEE80211_AMPDU_RX_STOP:
+ ret = wl1271_acx_set_ba_receiver_session(wl, tid, 0, false);
+ if (!ret)
+ wl->ba_rx_bitmap &= ~BIT(tid);
+ break;
+
+ /*
+ * The BA initiator session management in FW independently.
+ * Falling break here on purpose for all TX APDU commands.
+ */
+ case IEEE80211_AMPDU_TX_START:
+ case IEEE80211_AMPDU_TX_STOP:
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ret = -EINVAL;
+ break;
+
+ default:
+ wl1271_error("Incorrect ampdu action id=%x\n", action);
+ ret = -EINVAL;
+ }
+
+ wl1271_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+
+ return ret;
+}
+
/* can't be const, mac80211 writes to this */
static struct ieee80211_rate wl1271_rates[] = {
{ .bitrate = 10,
@@ -2305,6 +2841,7 @@ static struct ieee80211_channel wl1271_channels[] = {
{ .hw_value = 11, .center_freq = 2462, .max_power = 25 },
{ .hw_value = 12, .center_freq = 2467, .max_power = 25 },
{ .hw_value = 13, .center_freq = 2472, .max_power = 25 },
+ { .hw_value = 14, .center_freq = 2484, .max_power = 25 },
};
/* mapping to indexes for wl1271_rates */
@@ -2493,6 +3030,9 @@ static const struct ieee80211_ops wl1271_ops = {
.conf_tx = wl1271_op_conf_tx,
.get_tsf = wl1271_op_get_tsf,
.get_survey = wl1271_op_get_survey,
+ .sta_add = wl1271_op_sta_add,
+ .sta_remove = wl1271_op_sta_remove,
+ .ampdu_action = wl1271_op_ampdu_action,
CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
};
@@ -2607,6 +3147,18 @@ int wl1271_register_hw(struct wl1271 *wl)
if (wl->mac80211_registered)
return 0;
+ ret = wl1271_fetch_nvs(wl);
+ if (ret == 0) {
+ u8 *nvs_ptr = (u8 *)wl->nvs->nvs;
+
+ wl->mac_addr[0] = nvs_ptr[11];
+ wl->mac_addr[1] = nvs_ptr[10];
+ wl->mac_addr[2] = nvs_ptr[6];
+ wl->mac_addr[3] = nvs_ptr[5];
+ wl->mac_addr[4] = nvs_ptr[4];
+ wl->mac_addr[5] = nvs_ptr[3];
+ }
+
SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
ret = ieee80211_register_hw(wl->hw);
@@ -2629,6 +3181,9 @@ EXPORT_SYMBOL_GPL(wl1271_register_hw);
void wl1271_unregister_hw(struct wl1271 *wl)
{
+ if (wl->state == WL1271_STATE_PLT)
+ __wl1271_plt_stop(wl);
+
unregister_netdevice_notifier(&wl1271_dev_notifier);
ieee80211_unregister_hw(wl->hw);
wl->mac80211_registered = false;
@@ -2667,7 +3222,7 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC);
+ BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
wl->hw->wiphy->max_scan_ssids = 1;
/*
* Maximum length of elements in scanning probe request templates
@@ -2676,8 +3231,20 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
*/
wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
sizeof(struct ieee80211_header);
- wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz;
- wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz;
+
+ /*
+ * We keep local copies of the band structs because we need to
+ * modify them on a per-device basis.
+ */
+ memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
+ sizeof(wl1271_band_2ghz));
+ memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
+ sizeof(wl1271_band_5ghz));
+
+ wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &wl->bands[IEEE80211_BAND_2GHZ];
+ wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &wl->bands[IEEE80211_BAND_5GHZ];
wl->hw->queues = 4;
wl->hw->max_rates = 1;
@@ -2686,6 +3253,10 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl));
+ wl->hw->sta_data_size = sizeof(struct wl1271_station);
+
+ wl->hw->max_rx_aggregation_subframes = 8;
+
return 0;
}
EXPORT_SYMBOL_GPL(wl1271_init_ieee80211);
@@ -2735,8 +3306,8 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
wl->default_key = 0;
wl->rx_counter = 0;
- wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
- wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
+ wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG;
+ wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER;
wl->psm_entry_retry = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
@@ -2748,6 +3319,9 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
wl->flags = 0;
wl->sg_enabled = true;
wl->hw_pg_ver = -1;
+ wl->bss_type = MAX_BSS_TYPE;
+ wl->set_bss_type = MAX_BSS_TYPE;
+ wl->fw_bss_type = MAX_BSS_TYPE;
memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
@@ -2837,9 +3411,9 @@ int wl1271_free_hw(struct wl1271 *wl)
}
EXPORT_SYMBOL_GPL(wl1271_free_hw);
-u32 wl12xx_debug_level;
+u32 wl12xx_debug_level = DEBUG_NONE;
EXPORT_SYMBOL_GPL(wl12xx_debug_level);
-module_param_named(debug_level, wl12xx_debug_level, uint, DEBUG_NONE);
+module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/wl12xx/rx.c
index 682304c..b0c6ddc 100644
--- a/drivers/net/wireless/wl12xx/rx.c
+++ b/drivers/net/wireless/wl12xx/rx.c
@@ -76,7 +76,7 @@ static void wl1271_rx_status(struct wl1271 *wl,
*/
wl->noise = desc->rssi - (desc->snr >> 1);
- status->freq = ieee80211_channel_to_frequency(desc->channel);
+ status->freq = ieee80211_channel_to_frequency(desc->channel, desc_band);
if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) {
status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
@@ -198,6 +198,16 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
pkt_offset += pkt_length;
}
}
- wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS,
- cpu_to_le32(wl->rx_counter));
+ wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
+}
+
+void wl1271_set_default_filters(struct wl1271 *wl)
+{
+ if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ wl->rx_config = WL1271_DEFAULT_AP_RX_CONFIG;
+ wl->rx_filter = WL1271_DEFAULT_AP_RX_FILTER;
+ } else {
+ wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG;
+ wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER;
+ }
}
diff --git a/drivers/net/wireless/wl12xx/rx.h b/drivers/net/wireless/wl12xx/rx.h
index 3abb26f..8d048b3 100644
--- a/drivers/net/wireless/wl12xx/rx.h
+++ b/drivers/net/wireless/wl12xx/rx.h
@@ -86,8 +86,9 @@
/*
* RX Descriptor status
*
- * Bits 0-2 - status
- * Bits 3-7 - reserved
+ * Bits 0-2 - error code
+ * Bits 3-5 - process_id tag (AP mode FW)
+ * Bits 6-7 - reserved
*/
#define WL1271_RX_DESC_STATUS_MASK 0x07
@@ -110,12 +111,16 @@ struct wl1271_rx_descriptor {
u8 snr;
__le32 timestamp;
u8 packet_class;
- u8 process_id;
+ union {
+ u8 process_id; /* STA FW */
+ u8 hlid; /* AP FW */
+ } __packed;
u8 pad_len;
u8 reserved;
} __packed;
void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status);
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
+void wl1271_set_default_filters(struct wl1271 *wl);
#endif
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 93cbb8d..d5e8748 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -345,3 +345,4 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
MODULE_FIRMWARE(WL1271_FW_NAME);
+MODULE_FIRMWARE(WL1271_AP_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index 7145ea5..0132dad 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -110,6 +110,7 @@ static void wl1271_spi_reset(struct wl1271 *wl)
spi_message_add_tail(&t, &m);
spi_sync(wl_to_spi(wl), &m);
+
wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
kfree(cmd);
}
@@ -494,4 +495,5 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
MODULE_FIRMWARE(WL1271_FW_NAME);
+MODULE_FIRMWARE(WL1271_AP_FW_NAME);
MODULE_ALIAS("spi:wl1271");
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c
index b44c75c..3507c81 100644
--- a/drivers/net/wireless/wl12xx/tx.c
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/etherdevice.h>
#include "wl12xx.h"
#include "io.h"
@@ -30,6 +31,23 @@
#include "ps.h"
#include "tx.h"
+static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id)
+{
+ int ret;
+ bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+
+ if (is_ap)
+ ret = wl1271_cmd_set_ap_default_wep_key(wl, id);
+ else
+ ret = wl1271_cmd_set_sta_default_wep_key(wl, id);
+
+ if (ret < 0)
+ return ret;
+
+ wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
+ return 0;
+}
+
static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
{
int id;
@@ -99,7 +117,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
{
struct timespec ts;
struct wl1271_tx_hw_descr *desc;
- int pad, ac;
+ int pad, ac, rate_idx;
s64 hosttime;
u16 tx_attr;
@@ -117,7 +135,11 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
getnstimeofday(&ts);
hosttime = (timespec_to_ns(&ts) >> 10);
desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
- desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
+
+ if (wl->bss_type != BSS_TYPE_AP_BSS)
+ desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
+ else
+ desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
/* configure the tx attributes */
tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
@@ -125,7 +147,41 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
/* queue (we use same identifiers for tid's and ac's */
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
desc->tid = ac;
- desc->aid = TX_HW_DEFAULT_AID;
+
+ if (wl->bss_type != BSS_TYPE_AP_BSS) {
+ desc->aid = TX_HW_DEFAULT_AID;
+
+ /* if the packets are destined for AP (have a STA entry)
+ send them with AP rate policies, otherwise use default
+ basic rates */
+ if (control->control.sta)
+ rate_idx = ACX_TX_AP_FULL_RATE;
+ else
+ rate_idx = ACX_TX_BASIC_RATE;
+ } else {
+ if (control->control.sta) {
+ struct wl1271_station *wl_sta;
+
+ wl_sta = (struct wl1271_station *)
+ control->control.sta->drv_priv;
+ desc->hlid = wl_sta->hlid;
+ rate_idx = ac;
+ } else {
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)
+ (skb->data + sizeof(*desc));
+ if (ieee80211_is_mgmt(hdr->frame_control)) {
+ desc->hlid = WL1271_AP_GLOBAL_HLID;
+ rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
+ } else {
+ desc->hlid = WL1271_AP_BROADCAST_HLID;
+ rate_idx = ACX_TX_AP_MODE_BCST_RATE;
+ }
+ }
+ }
+
+ tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
desc->reserved = 0;
/* align the length (and store in terms of words) */
@@ -136,14 +192,12 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
pad = pad - skb->len;
tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
- /* if the packets are destined for AP (have a STA entry) send them
- with AP rate policies, otherwise use default basic rates */
- if (control->control.sta)
- tx_attr |= ACX_TX_AP_FULL_RATE << TX_HW_ATTR_OFST_RATE_POLICY;
-
desc->tx_attr = cpu_to_le16(tx_attr);
- wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad);
+ wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d hlid: %d "
+ "tx_attr: 0x%x len: %d life: %d mem: %d", pad, desc->hlid,
+ le16_to_cpu(desc->tx_attr), le16_to_cpu(desc->length),
+ le16_to_cpu(desc->life_time), desc->total_mem_blocks);
}
/* caller must hold wl->mutex */
@@ -153,7 +207,6 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
struct ieee80211_tx_info *info;
u32 extra = 0;
int ret = 0;
- u8 idx;
u32 total_len;
if (!skb)
@@ -166,11 +219,15 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
extra = WL1271_TKIP_IV_SPACE;
if (info->control.hw_key) {
- idx = info->control.hw_key->hw_key_idx;
+ bool is_wep;
+ u8 idx = info->control.hw_key->hw_key_idx;
+ u32 cipher = info->control.hw_key->cipher;
+
+ is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
+ (cipher == WLAN_CIPHER_SUITE_WEP104);
- /* FIXME: do we have to do this if we're not using WEP? */
- if (unlikely(wl->default_key != idx)) {
- ret = wl1271_cmd_set_default_wep_key(wl, idx);
+ if (unlikely(is_wep && wl->default_key != idx)) {
+ ret = wl1271_set_default_wep_key(wl, idx);
if (ret < 0)
return ret;
wl->default_key = idx;
@@ -303,7 +360,7 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
woken_up = true;
wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
- wl1271_acx_rate_policies(wl);
+ wl1271_acx_sta_rate_policies(wl);
}
while ((skb = wl1271_skb_dequeue(wl))) {
@@ -521,3 +578,21 @@ void wl1271_tx_flush(struct wl1271 *wl)
wl1271_warning("Unable to flush all TX buffers, timed out.");
}
+
+u32 wl1271_tx_min_rate_get(struct wl1271 *wl)
+{
+ int i;
+ u32 rate = 0;
+
+ if (!wl->basic_rate_set) {
+ WARN_ON(1);
+ wl->basic_rate_set = wl->conf.tx.basic_rate;
+ }
+
+ for (i = 0; !rate; i++) {
+ if ((wl->basic_rate_set >> i) & 0x1)
+ rate = 1 << i;
+ }
+
+ return rate;
+}
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/wl12xx/tx.h
index 903e5dc..05722a5 100644
--- a/drivers/net/wireless/wl12xx/tx.h
+++ b/drivers/net/wireless/wl12xx/tx.h
@@ -29,6 +29,7 @@
#define TX_HW_BLOCK_SIZE 252
#define TX_HW_MGMT_PKT_LIFETIME_TU 2000
+#define TX_HW_AP_MODE_PKT_LIFETIME_TU 8000
/* The chipset reference driver states, that the "aid" value 1
* is for infra-BSS, but is still always used */
#define TX_HW_DEFAULT_AID 1
@@ -77,8 +78,12 @@ struct wl1271_tx_hw_descr {
u8 id;
/* The packet TID value (as User-Priority) */
u8 tid;
- /* Identifier of the remote STA in IBSS, 1 in infra-BSS */
- u8 aid;
+ union {
+ /* STA - Identifier of the remote STA in IBSS, 1 in infra-BSS */
+ u8 aid;
+ /* AP - host link ID (HLID) */
+ u8 hlid;
+ } __packed;
u8 reserved;
} __packed;
@@ -146,5 +151,6 @@ void wl1271_tx_reset(struct wl1271 *wl);
void wl1271_tx_flush(struct wl1271 *wl);
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
+u32 wl1271_tx_min_rate_get(struct wl1271 *wl);
#endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index 9050dd9..d1de13f 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -38,6 +38,13 @@
#define DRIVER_NAME "wl1271"
#define DRIVER_PREFIX DRIVER_NAME ": "
+/*
+ * FW versions support BA 11n
+ * versions marks x.x.x.50-60.x
+ */
+#define WL12XX_BA_SUPPORT_FW_COST_VER2_START 50
+#define WL12XX_BA_SUPPORT_FW_COST_VER2_END 60
+
enum {
DEBUG_NONE = 0,
DEBUG_IRQ = BIT(0),
@@ -57,6 +64,8 @@ enum {
DEBUG_SDIO = BIT(14),
DEBUG_FILTERS = BIT(15),
DEBUG_ADHOC = BIT(16),
+ DEBUG_AP = BIT(17),
+ DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP),
DEBUG_ALL = ~0,
};
@@ -103,16 +112,27 @@ extern u32 wl12xx_debug_level;
true); \
} while (0)
-#define WL1271_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN | \
+#define WL1271_DEFAULT_STA_RX_CONFIG (CFG_UNI_FILTER_EN | \
CFG_BSSID_FILTER_EN | \
CFG_MC_FILTER_EN)
-#define WL1271_DEFAULT_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \
+#define WL1271_DEFAULT_STA_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \
CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
CFG_RX_CTL_EN | CFG_RX_BCN_EN | \
CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
+#define WL1271_DEFAULT_AP_RX_CONFIG 0
+
+#define WL1271_DEFAULT_AP_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PREQ_EN | \
+ CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
+ CFG_RX_CTL_EN | CFG_RX_AUTH_EN | \
+ CFG_RX_ASSOC_EN)
+
+
+
#define WL1271_FW_NAME "wl1271-fw.bin"
+#define WL1271_AP_FW_NAME "wl1271-fw-ap.bin"
+
#define WL1271_NVS_NAME "wl1271-nvs.bin"
#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
@@ -129,6 +149,14 @@ extern u32 wl12xx_debug_level;
#define WL1271_DEFAULT_BEACON_INT 100
#define WL1271_DEFAULT_DTIM_PERIOD 1
+#define WL1271_AP_GLOBAL_HLID 0
+#define WL1271_AP_BROADCAST_HLID 1
+#define WL1271_AP_STA_HLID_START 2
+
+#define WL1271_AP_BSS_INDEX 0
+#define WL1271_AP_DEF_INACTIV_SEC 300
+#define WL1271_AP_DEF_BEACON_EXP 20
+
#define ACX_TX_DESCRIPTORS 32
#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
@@ -161,10 +189,13 @@ struct wl1271_partition_set {
struct wl1271;
+#define WL12XX_NUM_FW_VER 5
+
/* FIXME: I'm not sure about this structure name */
struct wl1271_chip {
u32 id;
- char fw_ver[21];
+ char fw_ver_str[ETHTOOL_BUSINFO_LEN];
+ unsigned int fw_ver[WL12XX_NUM_FW_VER];
};
struct wl1271_stats {
@@ -178,6 +209,11 @@ struct wl1271_stats {
#define NUM_TX_QUEUES 4
#define NUM_RX_PKT_DESC 8
+#define AP_MAX_STATIONS 5
+
+/* Broadcast and Global links + links to stations */
+#define AP_MAX_LINKS (AP_MAX_STATIONS + 2)
+
/* FW status registers */
struct wl1271_fw_status {
__le32 intr;
@@ -188,7 +224,18 @@ struct wl1271_fw_status {
__le32 rx_pkt_descs[NUM_RX_PKT_DESC];
__le32 tx_released_blks[NUM_TX_QUEUES];
__le32 fw_localtime;
- __le32 padding[2];
+
+ /* Next fields valid only in AP FW */
+
+ /*
+ * A bitmap (where each bit represents a single HLID)
+ * to indicate if the station is in PS mode.
+ */
+ __le32 link_ps_bitmap;
+
+ /* Number of freed MBs per HLID */
+ u8 tx_lnk_free_blks[AP_MAX_LINKS];
+ u8 padding_1[1];
} __packed;
struct wl1271_rx_mem_pool_addr {
@@ -218,6 +265,19 @@ struct wl1271_if_operations {
void (*disable_irq)(struct wl1271 *wl);
};
+#define MAX_NUM_KEYS 14
+#define MAX_KEY_SIZE 32
+
+struct wl1271_ap_key {
+ u8 id;
+ u8 key_type;
+ u8 key_size;
+ u8 key[MAX_KEY_SIZE];
+ u8 hlid;
+ u32 tx_seq_32;
+ u16 tx_seq_16;
+};
+
struct wl1271 {
struct platform_device *plat_dev;
struct ieee80211_hw *hw;
@@ -251,6 +311,7 @@ struct wl1271 {
#define WL1271_FLAG_PSPOLL_FAILURE (12)
#define WL1271_FLAG_STA_STATE_SENT (13)
#define WL1271_FLAG_FW_TX_BUSY (14)
+#define WL1271_FLAG_AP_STARTED (15)
unsigned long flags;
struct wl1271_partition_set part;
@@ -262,6 +323,7 @@ struct wl1271 {
u8 *fw;
size_t fw_len;
+ u8 fw_bss_type;
struct wl1271_nvs_file *nvs;
size_t nvs_len;
@@ -378,7 +440,6 @@ struct wl1271 {
int last_rssi_event;
struct wl1271_stats stats;
- struct dentry *rootdir;
__le32 buffer_32;
u32 buffer_cmd;
@@ -400,6 +461,23 @@ struct wl1271 {
/* Most recently reported noise in dBm */
s8 noise;
+
+ /* map for HLIDs of associated stations - when operating in AP mode */
+ unsigned long ap_hlid_map[BITS_TO_LONGS(AP_MAX_STATIONS)];
+
+ /* recoreded keys for AP-mode - set here before AP startup */
+ struct wl1271_ap_key *recorded_ap_keys[MAX_NUM_KEYS];
+
+ /* bands supported by this instance of wl12xx */
+ struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+
+ /* RX BA constraint value */
+ bool ba_support;
+ u8 ba_rx_bitmap;
+};
+
+struct wl1271_station {
+ u8 hlid;
};
int wl1271_plt_start(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index be21032..67dcf8f 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -138,13 +138,13 @@ struct wl12xx_arp_rsp_template {
struct ieee80211_hdr_3addr hdr;
u8 llc_hdr[sizeof(rfc1042_header)];
- u16 llc_type;
+ __be16 llc_type;
struct arphdr arp_hdr;
u8 sender_hw[ETH_ALEN];
- u32 sender_ip;
+ __be32 sender_ip;
u8 target_hw[ETH_ALEN];
- u32 target_ip;
+ __be32 target_ip;
} __packed;
@@ -160,4 +160,9 @@ struct wl12xx_probe_resp_template {
struct wl12xx_ie_country country;
} __packed;
+struct wl12xx_disconn_template {
+ struct ieee80211_header header;
+ __le16 disconn_reason;
+} __packed;
+
#endif
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 6a9b660..54f68f1 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -108,25 +108,17 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
{
int r;
int i;
- zd_addr_t *a16;
- u16 *v16;
+ zd_addr_t a16[USB_MAX_IOREAD32_COUNT * 2];
+ u16 v16[USB_MAX_IOREAD32_COUNT * 2];
unsigned int count16;
if (count > USB_MAX_IOREAD32_COUNT)
return -EINVAL;
- /* Allocate a single memory block for values and addresses. */
- count16 = 2*count;
- /* zd_addr_t is __nocast, so the kmalloc needs an explicit cast */
- a16 = (zd_addr_t *) kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)),
- GFP_KERNEL);
- if (!a16) {
- dev_dbg_f(zd_chip_dev(chip),
- "error ENOMEM in allocation of a16\n");
- r = -ENOMEM;
- goto out;
- }
- v16 = (u16 *)(a16 + count16);
+ /* Use stack for values and addresses. */
+ count16 = 2 * count;
+ BUG_ON(count16 * sizeof(zd_addr_t) > sizeof(a16));
+ BUG_ON(count16 * sizeof(u16) > sizeof(v16));
for (i = 0; i < count; i++) {
int j = 2*i;
@@ -139,7 +131,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
if (r) {
dev_dbg_f(zd_chip_dev(chip),
"error: zd_ioread16v_locked. Error number %d\n", r);
- goto out;
+ return r;
}
for (i = 0; i < count; i++) {
@@ -147,18 +139,18 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
values[i] = (v16[j] << 16) | v16[j+1];
}
-out:
- kfree((void *)a16);
- return r;
+ return 0;
}
int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
unsigned int count)
{
int i, j, r;
- struct zd_ioreq16 *ioreqs16;
+ struct zd_ioreq16 ioreqs16[USB_MAX_IOWRITE32_COUNT * 2];
unsigned int count16;
+ /* Use stack for values and addresses. */
+
ZD_ASSERT(mutex_is_locked(&chip->mutex));
if (count == 0)
@@ -166,15 +158,8 @@ int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
if (count > USB_MAX_IOWRITE32_COUNT)
return -EINVAL;
- /* Allocate a single memory block for values and addresses. */
- count16 = 2*count;
- ioreqs16 = kmalloc(count16 * sizeof(struct zd_ioreq16), GFP_KERNEL);
- if (!ioreqs16) {
- r = -ENOMEM;
- dev_dbg_f(zd_chip_dev(chip),
- "error %d in ioreqs16 allocation\n", r);
- goto out;
- }
+ count16 = 2 * count;
+ BUG_ON(count16 * sizeof(struct zd_ioreq16) > sizeof(ioreqs16));
for (i = 0; i < count; i++) {
j = 2*i;
@@ -192,8 +177,6 @@ int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs,
"error %d in zd_usb_write16v\n", r);
}
#endif /* DEBUG */
-out:
- kfree(ioreqs16);
return r;
}
@@ -370,16 +353,12 @@ error:
return r;
}
-/* MAC address: if custom mac addresses are to be used CR_MAC_ADDR_P1 and
- * CR_MAC_ADDR_P2 must be overwritten
- */
-int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
+static int zd_write_mac_addr_common(struct zd_chip *chip, const u8 *mac_addr,
+ const struct zd_ioreq32 *in_reqs,
+ const char *type)
{
int r;
- struct zd_ioreq32 reqs[2] = {
- [0] = { .addr = CR_MAC_ADDR_P1 },
- [1] = { .addr = CR_MAC_ADDR_P2 },
- };
+ struct zd_ioreq32 reqs[2] = {in_reqs[0], in_reqs[1]};
if (mac_addr) {
reqs[0].value = (mac_addr[3] << 24)
@@ -388,9 +367,9 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
| mac_addr[0];
reqs[1].value = (mac_addr[5] << 8)
| mac_addr[4];
- dev_dbg_f(zd_chip_dev(chip), "mac addr %pM\n", mac_addr);
+ dev_dbg_f(zd_chip_dev(chip), "%s addr %pM\n", type, mac_addr);
} else {
- dev_dbg_f(zd_chip_dev(chip), "set NULL mac\n");
+ dev_dbg_f(zd_chip_dev(chip), "set NULL %s\n", type);
}
mutex_lock(&chip->mutex);
@@ -399,6 +378,29 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
return r;
}
+/* MAC address: if custom mac addresses are to be used CR_MAC_ADDR_P1 and
+ * CR_MAC_ADDR_P2 must be overwritten
+ */
+int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
+{
+ static const struct zd_ioreq32 reqs[2] = {
+ [0] = { .addr = CR_MAC_ADDR_P1 },
+ [1] = { .addr = CR_MAC_ADDR_P2 },
+ };
+
+ return zd_write_mac_addr_common(chip, mac_addr, reqs, "mac");
+}
+
+int zd_write_bssid(struct zd_chip *chip, const u8 *bssid)
+{
+ static const struct zd_ioreq32 reqs[2] = {
+ [0] = { .addr = CR_BSSID_P1 },
+ [1] = { .addr = CR_BSSID_P2 },
+ };
+
+ return zd_write_mac_addr_common(chip, bssid, reqs, "bssid");
+}
+
int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain)
{
int r;
@@ -849,11 +851,12 @@ static int get_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
{
struct zd_ioreq32 reqs[3];
+ u16 b_interval = s->beacon_interval & 0xffff;
- if (s->beacon_interval <= 5)
- s->beacon_interval = 5;
- if (s->pre_tbtt < 4 || s->pre_tbtt >= s->beacon_interval)
- s->pre_tbtt = s->beacon_interval - 1;
+ if (b_interval <= 5)
+ b_interval = 5;
+ if (s->pre_tbtt < 4 || s->pre_tbtt >= b_interval)
+ s->pre_tbtt = b_interval - 1;
if (s->atim_wnd_period >= s->pre_tbtt)
s->atim_wnd_period = s->pre_tbtt - 1;
@@ -862,31 +865,57 @@ static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s)
reqs[1].addr = CR_PRE_TBTT;
reqs[1].value = s->pre_tbtt;
reqs[2].addr = CR_BCN_INTERVAL;
- reqs[2].value = s->beacon_interval;
+ reqs[2].value = (s->beacon_interval & ~0xffff) | b_interval;
return zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs));
}
-static int set_beacon_interval(struct zd_chip *chip, u32 interval)
+static int set_beacon_interval(struct zd_chip *chip, u16 interval,
+ u8 dtim_period, int type)
{
int r;
struct aw_pt_bi s;
+ u32 b_interval, mode_flag;
ZD_ASSERT(mutex_is_locked(&chip->mutex));
+
+ if (interval > 0) {
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
+ mode_flag = BCN_MODE_IBSS;
+ break;
+ case NL80211_IFTYPE_AP:
+ mode_flag = BCN_MODE_AP;
+ break;
+ default:
+ mode_flag = 0;
+ break;
+ }
+ } else {
+ dtim_period = 0;
+ mode_flag = 0;
+ }
+
+ b_interval = mode_flag | (dtim_period << 16) | interval;
+
+ r = zd_iowrite32_locked(chip, b_interval, CR_BCN_INTERVAL);
+ if (r)
+ return r;
r = get_aw_pt_bi(chip, &s);
if (r)
return r;
- s.beacon_interval = interval;
return set_aw_pt_bi(chip, &s);
}
-int zd_set_beacon_interval(struct zd_chip *chip, u32 interval)
+int zd_set_beacon_interval(struct zd_chip *chip, u16 interval, u8 dtim_period,
+ int type)
{
int r;
mutex_lock(&chip->mutex);
- r = set_beacon_interval(chip, interval);
+ r = set_beacon_interval(chip, interval, dtim_period, type);
mutex_unlock(&chip->mutex);
return r;
}
@@ -905,7 +934,7 @@ static int hw_init(struct zd_chip *chip)
if (r)
return r;
- return set_beacon_interval(chip, 100);
+ return set_beacon_interval(chip, 100, 0, NL80211_IFTYPE_UNSPECIFIED);
}
static zd_addr_t fw_reg_addr(struct zd_chip *chip, u16 offset)
@@ -1407,6 +1436,9 @@ void zd_chip_disable_int(struct zd_chip *chip)
mutex_lock(&chip->mutex);
zd_usb_disable_int(&chip->usb);
mutex_unlock(&chip->mutex);
+
+ /* cancel pending interrupt work */
+ cancel_work_sync(&zd_chip_to_mac(chip)->process_intr);
}
int zd_chip_enable_rxtx(struct zd_chip *chip)
@@ -1416,6 +1448,7 @@ int zd_chip_enable_rxtx(struct zd_chip *chip)
mutex_lock(&chip->mutex);
zd_usb_enable_tx(&chip->usb);
r = zd_usb_enable_rx(&chip->usb);
+ zd_tx_watchdog_enable(&chip->usb);
mutex_unlock(&chip->mutex);
return r;
}
@@ -1423,6 +1456,7 @@ int zd_chip_enable_rxtx(struct zd_chip *chip)
void zd_chip_disable_rxtx(struct zd_chip *chip)
{
mutex_lock(&chip->mutex);
+ zd_tx_watchdog_disable(&chip->usb);
zd_usb_disable_rx(&chip->usb);
zd_usb_disable_tx(&chip->usb);
mutex_unlock(&chip->mutex);
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index f8bbf7d..14e4402 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -546,6 +546,7 @@ enum {
#define RX_FILTER_CTRL (RX_FILTER_RTS | RX_FILTER_CTS | \
RX_FILTER_CFEND | RX_FILTER_CFACK)
+#define BCN_MODE_AP 0x1000000
#define BCN_MODE_IBSS 0x2000000
/* Monitor mode sets filter to 0xfffff */
@@ -881,6 +882,7 @@ static inline u8 _zd_chip_get_channel(struct zd_chip *chip)
u8 zd_chip_get_channel(struct zd_chip *chip);
int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain);
int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr);
+int zd_write_bssid(struct zd_chip *chip, const u8 *bssid);
int zd_chip_switch_radio_on(struct zd_chip *chip);
int zd_chip_switch_radio_off(struct zd_chip *chip);
int zd_chip_enable_int(struct zd_chip *chip);
@@ -920,7 +922,8 @@ enum led_status {
int zd_chip_control_leds(struct zd_chip *chip, enum led_status status);
-int zd_set_beacon_interval(struct zd_chip *chip, u32 interval);
+int zd_set_beacon_interval(struct zd_chip *chip, u16 interval, u8 dtim_period,
+ int type);
static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval)
{
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 6107304..74a269e 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -138,6 +138,12 @@ static const struct ieee80211_channel zd_channels[] = {
static void housekeeping_init(struct zd_mac *mac);
static void housekeeping_enable(struct zd_mac *mac);
static void housekeeping_disable(struct zd_mac *mac);
+static void beacon_init(struct zd_mac *mac);
+static void beacon_enable(struct zd_mac *mac);
+static void beacon_disable(struct zd_mac *mac);
+static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble);
+static int zd_mac_config_beacon(struct ieee80211_hw *hw,
+ struct sk_buff *beacon);
static int zd_reg2alpha2(u8 regdomain, char *alpha2)
{
@@ -231,6 +237,26 @@ static int set_rx_filter(struct zd_mac *mac)
return zd_iowrite32(&mac->chip, CR_RX_FILTER, filter);
}
+static int set_mac_and_bssid(struct zd_mac *mac)
+{
+ int r;
+
+ if (!mac->vif)
+ return -1;
+
+ r = zd_write_mac_addr(&mac->chip, mac->vif->addr);
+ if (r)
+ return r;
+
+ /* Vendor driver after setting MAC either sets BSSID for AP or
+ * filter for other modes.
+ */
+ if (mac->type != NL80211_IFTYPE_AP)
+ return set_rx_filter(mac);
+ else
+ return zd_write_bssid(&mac->chip, mac->vif->addr);
+}
+
static int set_mc_hash(struct zd_mac *mac)
{
struct zd_mc_hash hash;
@@ -238,7 +264,7 @@ static int set_mc_hash(struct zd_mac *mac)
return zd_chip_set_multicast_hash(&mac->chip, &hash);
}
-static int zd_op_start(struct ieee80211_hw *hw)
+int zd_op_start(struct ieee80211_hw *hw)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct zd_chip *chip = &mac->chip;
@@ -275,6 +301,8 @@ static int zd_op_start(struct ieee80211_hw *hw)
goto disable_rxtx;
housekeeping_enable(mac);
+ beacon_enable(mac);
+ set_bit(ZD_DEVICE_RUNNING, &mac->flags);
return 0;
disable_rxtx:
zd_chip_disable_rxtx(chip);
@@ -286,19 +314,22 @@ out:
return r;
}
-static void zd_op_stop(struct ieee80211_hw *hw)
+void zd_op_stop(struct ieee80211_hw *hw)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct zd_chip *chip = &mac->chip;
struct sk_buff *skb;
struct sk_buff_head *ack_wait_queue = &mac->ack_wait_queue;
+ clear_bit(ZD_DEVICE_RUNNING, &mac->flags);
+
/* The order here deliberately is a little different from the open()
* method, since we need to make sure there is no opportunity for RX
* frames to be processed by mac80211 after we have stopped it.
*/
zd_chip_disable_rxtx(chip);
+ beacon_disable(mac);
housekeeping_disable(mac);
flush_workqueue(zd_workqueue);
@@ -311,6 +342,68 @@ static void zd_op_stop(struct ieee80211_hw *hw)
dev_kfree_skb_any(skb);
}
+int zd_restore_settings(struct zd_mac *mac)
+{
+ struct sk_buff *beacon;
+ struct zd_mc_hash multicast_hash;
+ unsigned int short_preamble;
+ int r, beacon_interval, beacon_period;
+ u8 channel;
+
+ dev_dbg_f(zd_mac_dev(mac), "\n");
+
+ spin_lock_irq(&mac->lock);
+ multicast_hash = mac->multicast_hash;
+ short_preamble = mac->short_preamble;
+ beacon_interval = mac->beacon.interval;
+ beacon_period = mac->beacon.period;
+ channel = mac->channel;
+ spin_unlock_irq(&mac->lock);
+
+ r = set_mac_and_bssid(mac);
+ if (r < 0) {
+ dev_dbg_f(zd_mac_dev(mac), "set_mac_and_bssid failed, %d\n", r);
+ return r;
+ }
+
+ r = zd_chip_set_channel(&mac->chip, channel);
+ if (r < 0) {
+ dev_dbg_f(zd_mac_dev(mac), "zd_chip_set_channel failed, %d\n",
+ r);
+ return r;
+ }
+
+ set_rts_cts(mac, short_preamble);
+
+ r = zd_chip_set_multicast_hash(&mac->chip, &multicast_hash);
+ if (r < 0) {
+ dev_dbg_f(zd_mac_dev(mac),
+ "zd_chip_set_multicast_hash failed, %d\n", r);
+ return r;
+ }
+
+ if (mac->type == NL80211_IFTYPE_MESH_POINT ||
+ mac->type == NL80211_IFTYPE_ADHOC ||
+ mac->type == NL80211_IFTYPE_AP) {
+ if (mac->vif != NULL) {
+ beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+ if (beacon) {
+ zd_mac_config_beacon(mac->hw, beacon);
+ kfree_skb(beacon);
+ }
+ }
+
+ zd_set_beacon_interval(&mac->chip, beacon_interval,
+ beacon_period, mac->type);
+
+ spin_lock_irq(&mac->lock);
+ mac->beacon.last_update = jiffies;
+ spin_unlock_irq(&mac->lock);
+ }
+
+ return 0;
+}
+
/**
* zd_mac_tx_status - reports tx status of a packet if required
* @hw - a &struct ieee80211_hw pointer
@@ -574,64 +667,120 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
static int zd_mac_config_beacon(struct ieee80211_hw *hw, struct sk_buff *beacon)
{
struct zd_mac *mac = zd_hw_mac(hw);
- int r;
+ int r, ret, num_cmds, req_pos = 0;
u32 tmp, j = 0;
/* 4 more bytes for tail CRC */
u32 full_len = beacon->len + 4;
+ unsigned long end_jiffies, message_jiffies;
+ struct zd_ioreq32 *ioreqs;
+
+ /* Alloc memory for full beacon write at once. */
+ num_cmds = 1 + zd_chip_is_zd1211b(&mac->chip) + full_len;
+ ioreqs = kmalloc(num_cmds * sizeof(struct zd_ioreq32), GFP_KERNEL);
+ if (!ioreqs)
+ return -ENOMEM;
- r = zd_iowrite32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, 0);
+ mutex_lock(&mac->chip.mutex);
+
+ r = zd_iowrite32_locked(&mac->chip, 0, CR_BCN_FIFO_SEMAPHORE);
if (r < 0)
- return r;
- r = zd_ioread32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, &tmp);
+ goto out;
+ r = zd_ioread32_locked(&mac->chip, &tmp, CR_BCN_FIFO_SEMAPHORE);
if (r < 0)
- return r;
+ goto release_sema;
+ end_jiffies = jiffies + HZ / 2; /*~500ms*/
+ message_jiffies = jiffies + HZ / 10; /*~100ms*/
while (tmp & 0x2) {
- r = zd_ioread32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, &tmp);
+ r = zd_ioread32_locked(&mac->chip, &tmp, CR_BCN_FIFO_SEMAPHORE);
if (r < 0)
- return r;
- if ((++j % 100) == 0) {
- printk(KERN_ERR "CR_BCN_FIFO_SEMAPHORE not ready\n");
- if (j >= 500) {
- printk(KERN_ERR "Giving up beacon config.\n");
- return -ETIMEDOUT;
+ goto release_sema;
+ if (time_is_before_eq_jiffies(message_jiffies)) {
+ message_jiffies = jiffies + HZ / 10;
+ dev_err(zd_mac_dev(mac),
+ "CR_BCN_FIFO_SEMAPHORE not ready\n");
+ if (time_is_before_eq_jiffies(end_jiffies)) {
+ dev_err(zd_mac_dev(mac),
+ "Giving up beacon config.\n");
+ r = -ETIMEDOUT;
+ goto reset_device;
}
}
- msleep(1);
+ msleep(20);
}
- r = zd_iowrite32(&mac->chip, CR_BCN_FIFO, full_len - 1);
- if (r < 0)
- return r;
+ ioreqs[req_pos].addr = CR_BCN_FIFO;
+ ioreqs[req_pos].value = full_len - 1;
+ req_pos++;
if (zd_chip_is_zd1211b(&mac->chip)) {
- r = zd_iowrite32(&mac->chip, CR_BCN_LENGTH, full_len - 1);
- if (r < 0)
- return r;
+ ioreqs[req_pos].addr = CR_BCN_LENGTH;
+ ioreqs[req_pos].value = full_len - 1;
+ req_pos++;
}
for (j = 0 ; j < beacon->len; j++) {
- r = zd_iowrite32(&mac->chip, CR_BCN_FIFO,
- *((u8 *)(beacon->data + j)));
- if (r < 0)
- return r;
+ ioreqs[req_pos].addr = CR_BCN_FIFO;
+ ioreqs[req_pos].value = *((u8 *)(beacon->data + j));
+ req_pos++;
}
for (j = 0; j < 4; j++) {
- r = zd_iowrite32(&mac->chip, CR_BCN_FIFO, 0x0);
- if (r < 0)
- return r;
+ ioreqs[req_pos].addr = CR_BCN_FIFO;
+ ioreqs[req_pos].value = 0x0;
+ req_pos++;
}
- r = zd_iowrite32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, 1);
- if (r < 0)
- return r;
+ BUG_ON(req_pos != num_cmds);
+
+ r = zd_iowrite32a_locked(&mac->chip, ioreqs, num_cmds);
+
+release_sema:
+ /*
+ * Try very hard to release device beacon semaphore, as otherwise
+ * device/driver can be left in unusable state.
+ */
+ end_jiffies = jiffies + HZ / 2; /*~500ms*/
+ ret = zd_iowrite32_locked(&mac->chip, 1, CR_BCN_FIFO_SEMAPHORE);
+ while (ret < 0) {
+ if (time_is_before_eq_jiffies(end_jiffies)) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ msleep(20);
+ ret = zd_iowrite32_locked(&mac->chip, 1, CR_BCN_FIFO_SEMAPHORE);
+ }
+
+ if (ret < 0)
+ dev_err(zd_mac_dev(mac), "Could not release "
+ "CR_BCN_FIFO_SEMAPHORE!\n");
+ if (r < 0 || ret < 0) {
+ if (r >= 0)
+ r = ret;
+ goto out;
+ }
/* 802.11b/g 2.4G CCK 1Mb
* 802.11a, not yet implemented, uses different values (see GPL vendor
* driver)
*/
- return zd_iowrite32(&mac->chip, CR_BCN_PLCP_CFG, 0x00000400 |
- (full_len << 19));
+ r = zd_iowrite32_locked(&mac->chip, 0x00000400 | (full_len << 19),
+ CR_BCN_PLCP_CFG);
+out:
+ mutex_unlock(&mac->chip.mutex);
+ kfree(ioreqs);
+ return r;
+
+reset_device:
+ mutex_unlock(&mac->chip.mutex);
+ kfree(ioreqs);
+
+ /* semaphore stuck, reset device to avoid fw freeze later */
+ dev_warn(zd_mac_dev(mac), "CR_BCN_FIFO_SEMAPHORE stuck, "
+ "reseting device...");
+ usb_queue_reset_device(mac->chip.usb.intf);
+
+ return r;
}
static int fill_ctrlset(struct zd_mac *mac,
@@ -779,6 +928,13 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
mac->ack_pending = 1;
mac->ack_signal = stats->signal;
+
+ /* Prevent pending tx-packet on AP-mode */
+ if (mac->type == NL80211_IFTYPE_AP) {
+ skb = __skb_dequeue(q);
+ zd_mac_tx_status(hw, skb, mac->ack_signal, NULL);
+ mac->ack_pending = 0;
+ }
}
spin_unlock_irqrestore(&q->lock, flags);
@@ -882,13 +1038,16 @@ static int zd_op_add_interface(struct ieee80211_hw *hw,
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP:
mac->type = vif->type;
break;
default:
return -EOPNOTSUPP;
}
- return zd_write_mac_addr(&mac->chip, vif->addr);
+ mac->vif = vif;
+
+ return set_mac_and_bssid(mac);
}
static void zd_op_remove_interface(struct ieee80211_hw *hw,
@@ -896,7 +1055,8 @@ static void zd_op_remove_interface(struct ieee80211_hw *hw,
{
struct zd_mac *mac = zd_hw_mac(hw);
mac->type = NL80211_IFTYPE_UNSPECIFIED;
- zd_set_beacon_interval(&mac->chip, 0);
+ mac->vif = NULL;
+ zd_set_beacon_interval(&mac->chip, 0, 0, NL80211_IFTYPE_UNSPECIFIED);
zd_write_mac_addr(&mac->chip, NULL);
}
@@ -905,49 +1065,67 @@ static int zd_op_config(struct ieee80211_hw *hw, u32 changed)
struct zd_mac *mac = zd_hw_mac(hw);
struct ieee80211_conf *conf = &hw->conf;
+ spin_lock_irq(&mac->lock);
+ mac->channel = conf->channel->hw_value;
+ spin_unlock_irq(&mac->lock);
+
return zd_chip_set_channel(&mac->chip, conf->channel->hw_value);
}
-static void zd_process_intr(struct work_struct *work)
+static void zd_beacon_done(struct zd_mac *mac)
{
- u16 int_status;
- struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
+ struct sk_buff *skb, *beacon;
- int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer+4));
- if (int_status & INT_CFG_NEXT_BCN)
- dev_dbg_f_limit(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n");
- else
- dev_dbg_f(zd_mac_dev(mac), "Unsupported interrupt\n");
-
- zd_chip_enable_hwint(&mac->chip);
-}
+ if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
+ return;
+ if (!mac->vif || mac->vif->type != NL80211_IFTYPE_AP)
+ return;
+ /*
+ * Send out buffered broad- and multicast frames.
+ */
+ while (!ieee80211_queue_stopped(mac->hw, 0)) {
+ skb = ieee80211_get_buffered_bc(mac->hw, mac->vif);
+ if (!skb)
+ break;
+ zd_op_tx(mac->hw, skb);
+ }
-static void set_multicast_hash_handler(struct work_struct *work)
-{
- struct zd_mac *mac =
- container_of(work, struct zd_mac, set_multicast_hash_work);
- struct zd_mc_hash hash;
+ /*
+ * Fetch next beacon so that tim_count is updated.
+ */
+ beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+ if (beacon) {
+ zd_mac_config_beacon(mac->hw, beacon);
+ kfree_skb(beacon);
+ }
spin_lock_irq(&mac->lock);
- hash = mac->multicast_hash;
+ mac->beacon.last_update = jiffies;
spin_unlock_irq(&mac->lock);
-
- zd_chip_set_multicast_hash(&mac->chip, &hash);
}
-static void set_rx_filter_handler(struct work_struct *work)
+static void zd_process_intr(struct work_struct *work)
{
- struct zd_mac *mac =
- container_of(work, struct zd_mac, set_rx_filter_work);
- int r;
+ u16 int_status;
+ unsigned long flags;
+ struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
- dev_dbg_f(zd_mac_dev(mac), "\n");
- r = set_rx_filter(mac);
- if (r)
- dev_err(zd_mac_dev(mac), "set_rx_filter_handler error %d\n", r);
+ spin_lock_irqsave(&mac->lock, flags);
+ int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer + 4));
+ spin_unlock_irqrestore(&mac->lock, flags);
+
+ if (int_status & INT_CFG_NEXT_BCN) {
+ /*dev_dbg_f_limit(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n");*/
+ zd_beacon_done(mac);
+ } else {
+ dev_dbg_f(zd_mac_dev(mac), "Unsupported interrupt\n");
+ }
+
+ zd_chip_enable_hwint(&mac->chip);
}
+
static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list)
{
@@ -979,6 +1157,7 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
};
struct zd_mac *mac = zd_hw_mac(hw);
unsigned long flags;
+ int r;
/* Only deal with supported flags */
changed_flags &= SUPPORTED_FIF_FLAGS;
@@ -1000,11 +1179,13 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
mac->multicast_hash = hash;
spin_unlock_irqrestore(&mac->lock, flags);
- /* XXX: these can be called here now, can sleep now! */
- queue_work(zd_workqueue, &mac->set_multicast_hash_work);
+ zd_chip_set_multicast_hash(&mac->chip, &hash);
- if (changed_flags & FIF_CONTROL)
- queue_work(zd_workqueue, &mac->set_rx_filter_work);
+ if (changed_flags & FIF_CONTROL) {
+ r = set_rx_filter(mac);
+ if (r)
+ dev_err(zd_mac_dev(mac), "set_rx_filter error %d\n", r);
+ }
/* no handling required for FIF_OTHER_BSS as we don't currently
* do BSSID filtering */
@@ -1016,20 +1197,9 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
* time. */
}
-static void set_rts_cts_work(struct work_struct *work)
+static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble)
{
- struct zd_mac *mac =
- container_of(work, struct zd_mac, set_rts_cts_work);
- unsigned long flags;
- unsigned int short_preamble;
-
mutex_lock(&mac->chip.mutex);
-
- spin_lock_irqsave(&mac->lock, flags);
- mac->updating_rts_rate = 0;
- short_preamble = mac->short_preamble;
- spin_unlock_irqrestore(&mac->lock, flags);
-
zd_chip_set_rts_cts_rate_locked(&mac->chip, short_preamble);
mutex_unlock(&mac->chip.mutex);
}
@@ -1040,33 +1210,42 @@ static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
u32 changes)
{
struct zd_mac *mac = zd_hw_mac(hw);
- unsigned long flags;
int associated;
dev_dbg_f(zd_mac_dev(mac), "changes: %x\n", changes);
if (mac->type == NL80211_IFTYPE_MESH_POINT ||
- mac->type == NL80211_IFTYPE_ADHOC) {
+ mac->type == NL80211_IFTYPE_ADHOC ||
+ mac->type == NL80211_IFTYPE_AP) {
associated = true;
if (changes & BSS_CHANGED_BEACON) {
struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
if (beacon) {
+ zd_chip_disable_hwint(&mac->chip);
zd_mac_config_beacon(hw, beacon);
+ zd_chip_enable_hwint(&mac->chip);
kfree_skb(beacon);
}
}
if (changes & BSS_CHANGED_BEACON_ENABLED) {
- u32 interval;
+ u16 interval = 0;
+ u8 period = 0;
- if (bss_conf->enable_beacon)
- interval = BCN_MODE_IBSS |
- bss_conf->beacon_int;
- else
- interval = 0;
+ if (bss_conf->enable_beacon) {
+ period = bss_conf->dtim_period;
+ interval = bss_conf->beacon_int;
+ }
- zd_set_beacon_interval(&mac->chip, interval);
+ spin_lock_irq(&mac->lock);
+ mac->beacon.period = period;
+ mac->beacon.interval = interval;
+ mac->beacon.last_update = jiffies;
+ spin_unlock_irq(&mac->lock);
+
+ zd_set_beacon_interval(&mac->chip, interval, period,
+ mac->type);
}
} else
associated = is_valid_ether_addr(bss_conf->bssid);
@@ -1078,15 +1257,11 @@ static void zd_op_bss_info_changed(struct ieee80211_hw *hw,
/* TODO: do hardware bssid filtering */
if (changes & BSS_CHANGED_ERP_PREAMBLE) {
- spin_lock_irqsave(&mac->lock, flags);
+ spin_lock_irq(&mac->lock);
mac->short_preamble = bss_conf->use_short_preamble;
- if (!mac->updating_rts_rate) {
- mac->updating_rts_rate = 1;
- /* FIXME: should disable TX here, until work has
- * completed and RTS_CTS reg is updated */
- queue_work(zd_workqueue, &mac->set_rts_cts_work);
- }
- spin_unlock_irqrestore(&mac->lock, flags);
+ spin_unlock_irq(&mac->lock);
+
+ set_rts_cts(mac, bss_conf->use_short_preamble);
}
}
@@ -1138,12 +1313,14 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band;
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SIGNAL_UNSPEC;
+ IEEE80211_HW_SIGNAL_UNSPEC |
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC);
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP);
hw->max_signal = 100;
hw->queues = 1;
@@ -1160,15 +1337,82 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
zd_chip_init(&mac->chip, hw, intf);
housekeeping_init(mac);
- INIT_WORK(&mac->set_multicast_hash_work, set_multicast_hash_handler);
- INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work);
- INIT_WORK(&mac->set_rx_filter_work, set_rx_filter_handler);
+ beacon_init(mac);
INIT_WORK(&mac->process_intr, zd_process_intr);
SET_IEEE80211_DEV(hw, &intf->dev);
return hw;
}
+#define BEACON_WATCHDOG_DELAY round_jiffies_relative(HZ)
+
+static void beacon_watchdog_handler(struct work_struct *work)
+{
+ struct zd_mac *mac =
+ container_of(work, struct zd_mac, beacon.watchdog_work.work);
+ struct sk_buff *beacon;
+ unsigned long timeout;
+ int interval, period;
+
+ if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
+ goto rearm;
+ if (mac->type != NL80211_IFTYPE_AP || !mac->vif)
+ goto rearm;
+
+ spin_lock_irq(&mac->lock);
+ interval = mac->beacon.interval;
+ period = mac->beacon.period;
+ timeout = mac->beacon.last_update + msecs_to_jiffies(interval) + HZ;
+ spin_unlock_irq(&mac->lock);
+
+ if (interval > 0 && time_is_before_jiffies(timeout)) {
+ dev_dbg_f(zd_mac_dev(mac), "beacon interrupt stalled, "
+ "restarting. "
+ "(interval: %d, dtim: %d)\n",
+ interval, period);
+
+ zd_chip_disable_hwint(&mac->chip);
+
+ beacon = ieee80211_beacon_get(mac->hw, mac->vif);
+ if (beacon) {
+ zd_mac_config_beacon(mac->hw, beacon);
+ kfree_skb(beacon);
+ }
+
+ zd_set_beacon_interval(&mac->chip, interval, period, mac->type);
+
+ zd_chip_enable_hwint(&mac->chip);
+
+ spin_lock_irq(&mac->lock);
+ mac->beacon.last_update = jiffies;
+ spin_unlock_irq(&mac->lock);
+ }
+
+rearm:
+ queue_delayed_work(zd_workqueue, &mac->beacon.watchdog_work,
+ BEACON_WATCHDOG_DELAY);
+}
+
+static void beacon_init(struct zd_mac *mac)
+{
+ INIT_DELAYED_WORK(&mac->beacon.watchdog_work, beacon_watchdog_handler);
+}
+
+static void beacon_enable(struct zd_mac *mac)
+{
+ dev_dbg_f(zd_mac_dev(mac), "\n");
+
+ mac->beacon.last_update = jiffies;
+ queue_delayed_work(zd_workqueue, &mac->beacon.watchdog_work,
+ BEACON_WATCHDOG_DELAY);
+}
+
+static void beacon_disable(struct zd_mac *mac)
+{
+ dev_dbg_f(zd_mac_dev(mac), "\n");
+ cancel_delayed_work_sync(&mac->beacon.watchdog_work);
+}
+
#define LINK_LED_WORK_DELAY HZ
static void link_led_handler(struct work_struct *work)
@@ -1179,6 +1423,9 @@ static void link_led_handler(struct work_struct *work)
int is_associated;
int r;
+ if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
+ goto requeue;
+
spin_lock_irq(&mac->lock);
is_associated = mac->associated;
spin_unlock_irq(&mac->lock);
@@ -1188,6 +1435,7 @@ static void link_led_handler(struct work_struct *work)
if (r)
dev_dbg_f(zd_mac_dev(mac), "zd_chip_control_leds error %d\n", r);
+requeue:
queue_delayed_work(zd_workqueue, &mac->housekeeping.link_led_work,
LINK_LED_WORK_DELAY);
}
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index a6d86b9..f8c93c3 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -163,6 +163,17 @@ struct housekeeping {
struct delayed_work link_led_work;
};
+struct beacon {
+ struct delayed_work watchdog_work;
+ unsigned long last_update;
+ u16 interval;
+ u8 period;
+};
+
+enum zd_device_flags {
+ ZD_DEVICE_RUNNING,
+};
+
#define ZD_MAC_STATS_BUFFER_SIZE 16
#define ZD_MAC_MAX_ACK_WAITERS 50
@@ -172,17 +183,19 @@ struct zd_mac {
spinlock_t lock;
spinlock_t intr_lock;
struct ieee80211_hw *hw;
+ struct ieee80211_vif *vif;
struct housekeeping housekeeping;
- struct work_struct set_multicast_hash_work;
+ struct beacon beacon;
struct work_struct set_rts_cts_work;
- struct work_struct set_rx_filter_work;
struct work_struct process_intr;
struct zd_mc_hash multicast_hash;
u8 intr_buffer[USB_MAX_EP_INT_BUFFER];
u8 regdomain;
u8 default_regdomain;
+ u8 channel;
int type;
int associated;
+ unsigned long flags;
struct sk_buff_head ack_wait_queue;
struct ieee80211_channel channels[14];
struct ieee80211_rate rates[12];
@@ -191,9 +204,6 @@ struct zd_mac {
/* Short preamble (used for RTS/CTS) */
unsigned int short_preamble:1;
- /* flags to indicate update in progress */
- unsigned int updating_rts_rate:1;
-
/* whether to pass frames with CRC errors to stack */
unsigned int pass_failed_fcs:1;
@@ -304,6 +314,10 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length);
void zd_mac_tx_failed(struct urb *urb);
void zd_mac_tx_to_dev(struct sk_buff *skb, int error);
+int zd_op_start(struct ieee80211_hw *hw);
+void zd_op_stop(struct ieee80211_hw *hw);
+int zd_restore_settings(struct zd_mac *mac);
+
#ifdef DEBUG
void zd_dump_rx_status(const struct rx_status *status);
#else
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 06041cb..f6df366 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -377,8 +377,10 @@ static inline void handle_regs_int(struct urb *urb)
int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2));
if (int_num == CR_INTERRUPT) {
struct zd_mac *mac = zd_hw_mac(zd_usb_to_hw(urb->context));
+ spin_lock(&mac->lock);
memcpy(&mac->intr_buffer, urb->transfer_buffer,
USB_MAX_EP_INT_BUFFER);
+ spin_unlock(&mac->lock);
schedule_work(&mac->process_intr);
} else if (intr->read_regs_enabled) {
intr->read_regs.length = len = urb->actual_length;
@@ -409,8 +411,10 @@ static void int_urb_complete(struct urb *urb)
case -ENOENT:
case -ECONNRESET:
case -EPIPE:
- goto kfree;
+ dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
+ return;
default:
+ dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
goto resubmit;
}
@@ -441,12 +445,11 @@ static void int_urb_complete(struct urb *urb)
resubmit:
r = usb_submit_urb(urb, GFP_ATOMIC);
if (r) {
- dev_dbg_f(urb_dev(urb), "resubmit urb %p\n", urb);
- goto kfree;
+ dev_dbg_f(urb_dev(urb), "error: resubmit urb %p err code %d\n",
+ urb, r);
+ /* TODO: add worker to reset intr->urb */
}
return;
-kfree:
- kfree(urb->transfer_buffer);
}
static inline int int_urb_interval(struct usb_device *udev)
@@ -477,9 +480,8 @@ static inline int usb_int_enabled(struct zd_usb *usb)
int zd_usb_enable_int(struct zd_usb *usb)
{
int r;
- struct usb_device *udev;
+ struct usb_device *udev = zd_usb_to_usbdev(usb);
struct zd_usb_interrupt *intr = &usb->intr;
- void *transfer_buffer = NULL;
struct urb *urb;
dev_dbg_f(zd_usb_dev(usb), "\n");
@@ -500,20 +502,21 @@ int zd_usb_enable_int(struct zd_usb *usb)
intr->urb = urb;
spin_unlock_irq(&intr->lock);
- /* TODO: make it a DMA buffer */
r = -ENOMEM;
- transfer_buffer = kmalloc(USB_MAX_EP_INT_BUFFER, GFP_KERNEL);
- if (!transfer_buffer) {
+ intr->buffer = usb_alloc_coherent(udev, USB_MAX_EP_INT_BUFFER,
+ GFP_KERNEL, &intr->buffer_dma);
+ if (!intr->buffer) {
dev_dbg_f(zd_usb_dev(usb),
"couldn't allocate transfer_buffer\n");
goto error_set_urb_null;
}
- udev = zd_usb_to_usbdev(usb);
usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, EP_INT_IN),
- transfer_buffer, USB_MAX_EP_INT_BUFFER,
+ intr->buffer, USB_MAX_EP_INT_BUFFER,
int_urb_complete, usb,
intr->interval);
+ urb->transfer_dma = intr->buffer_dma;
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
dev_dbg_f(zd_usb_dev(usb), "submit urb %p\n", intr->urb);
r = usb_submit_urb(urb, GFP_KERNEL);
@@ -525,7 +528,8 @@ int zd_usb_enable_int(struct zd_usb *usb)
return 0;
error:
- kfree(transfer_buffer);
+ usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER,
+ intr->buffer, intr->buffer_dma);
error_set_urb_null:
spin_lock_irq(&intr->lock);
intr->urb = NULL;
@@ -539,8 +543,11 @@ out:
void zd_usb_disable_int(struct zd_usb *usb)
{
unsigned long flags;
+ struct usb_device *udev = zd_usb_to_usbdev(usb);
struct zd_usb_interrupt *intr = &usb->intr;
struct urb *urb;
+ void *buffer;
+ dma_addr_t buffer_dma;
spin_lock_irqsave(&intr->lock, flags);
urb = intr->urb;
@@ -549,11 +556,18 @@ void zd_usb_disable_int(struct zd_usb *usb)
return;
}
intr->urb = NULL;
+ buffer = intr->buffer;
+ buffer_dma = intr->buffer_dma;
+ intr->buffer = NULL;
spin_unlock_irqrestore(&intr->lock, flags);
usb_kill_urb(urb);
dev_dbg_f(zd_usb_dev(usb), "urb %p killed\n", urb);
usb_free_urb(urb);
+
+ if (buffer)
+ usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER,
+ buffer, buffer_dma);
}
static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
@@ -601,6 +615,7 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
static void rx_urb_complete(struct urb *urb)
{
+ int r;
struct zd_usb *usb;
struct zd_usb_rx *rx;
const u8 *buffer;
@@ -615,6 +630,7 @@ static void rx_urb_complete(struct urb *urb)
case -ENOENT:
case -ECONNRESET:
case -EPIPE:
+ dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
return;
default:
dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status);
@@ -626,6 +642,8 @@ static void rx_urb_complete(struct urb *urb)
usb = urb->context;
rx = &usb->rx;
+ zd_usb_reset_rx_idle_timer(usb);
+
if (length%rx->usb_packet_size > rx->usb_packet_size-4) {
/* If there is an old first fragment, we don't care. */
dev_dbg_f(urb_dev(urb), "*** first fragment ***\n");
@@ -654,7 +672,9 @@ static void rx_urb_complete(struct urb *urb)
}
resubmit:
- usb_submit_urb(urb, GFP_ATOMIC);
+ r = usb_submit_urb(urb, GFP_ATOMIC);
+ if (r)
+ dev_dbg_f(urb_dev(urb), "urb %p resubmit error %d\n", urb, r);
}
static struct urb *alloc_rx_urb(struct zd_usb *usb)
@@ -690,7 +710,7 @@ static void free_rx_urb(struct urb *urb)
usb_free_urb(urb);
}
-int zd_usb_enable_rx(struct zd_usb *usb)
+static int __zd_usb_enable_rx(struct zd_usb *usb)
{
int i, r;
struct zd_usb_rx *rx = &usb->rx;
@@ -742,7 +762,21 @@ error:
return r;
}
-void zd_usb_disable_rx(struct zd_usb *usb)
+int zd_usb_enable_rx(struct zd_usb *usb)
+{
+ int r;
+ struct zd_usb_rx *rx = &usb->rx;
+
+ mutex_lock(&rx->setup_mutex);
+ r = __zd_usb_enable_rx(usb);
+ mutex_unlock(&rx->setup_mutex);
+
+ zd_usb_reset_rx_idle_timer(usb);
+
+ return r;
+}
+
+static void __zd_usb_disable_rx(struct zd_usb *usb)
{
int i;
unsigned long flags;
@@ -769,6 +803,40 @@ void zd_usb_disable_rx(struct zd_usb *usb)
spin_unlock_irqrestore(&rx->lock, flags);
}
+void zd_usb_disable_rx(struct zd_usb *usb)
+{
+ struct zd_usb_rx *rx = &usb->rx;
+
+ mutex_lock(&rx->setup_mutex);
+ __zd_usb_disable_rx(usb);
+ mutex_unlock(&rx->setup_mutex);
+
+ cancel_delayed_work_sync(&rx->idle_work);
+}
+
+static void zd_usb_reset_rx(struct zd_usb *usb)
+{
+ bool do_reset;
+ struct zd_usb_rx *rx = &usb->rx;
+ unsigned long flags;
+
+ mutex_lock(&rx->setup_mutex);
+
+ spin_lock_irqsave(&rx->lock, flags);
+ do_reset = rx->urbs != NULL;
+ spin_unlock_irqrestore(&rx->lock, flags);
+
+ if (do_reset) {
+ __zd_usb_disable_rx(usb);
+ __zd_usb_enable_rx(usb);
+ }
+
+ mutex_unlock(&rx->setup_mutex);
+
+ if (do_reset)
+ zd_usb_reset_rx_idle_timer(usb);
+}
+
/**
* zd_usb_disable_tx - disable transmission
* @usb: the zd1211rw-private USB structure
@@ -779,19 +847,21 @@ void zd_usb_disable_tx(struct zd_usb *usb)
{
struct zd_usb_tx *tx = &usb->tx;
unsigned long flags;
- struct list_head *pos, *n;
+
+ atomic_set(&tx->enabled, 0);
+
+ /* kill all submitted tx-urbs */
+ usb_kill_anchored_urbs(&tx->submitted);
spin_lock_irqsave(&tx->lock, flags);
- list_for_each_safe(pos, n, &tx->free_urb_list) {
- list_del(pos);
- usb_free_urb(list_entry(pos, struct urb, urb_list));
- }
- tx->enabled = 0;
+ WARN_ON(!skb_queue_empty(&tx->submitted_skbs));
+ WARN_ON(tx->submitted_urbs != 0);
tx->submitted_urbs = 0;
+ spin_unlock_irqrestore(&tx->lock, flags);
+
/* The stopped state is ignored, relying on ieee80211_wake_queues()
* in a potentionally following zd_usb_enable_tx().
*/
- spin_unlock_irqrestore(&tx->lock, flags);
}
/**
@@ -807,63 +877,13 @@ void zd_usb_enable_tx(struct zd_usb *usb)
struct zd_usb_tx *tx = &usb->tx;
spin_lock_irqsave(&tx->lock, flags);
- tx->enabled = 1;
+ atomic_set(&tx->enabled, 1);
tx->submitted_urbs = 0;
ieee80211_wake_queues(zd_usb_to_hw(usb));
tx->stopped = 0;
spin_unlock_irqrestore(&tx->lock, flags);
}
-/**
- * alloc_tx_urb - provides an tx URB
- * @usb: a &struct zd_usb pointer
- *
- * Allocates a new URB. If possible takes the urb from the free list in
- * usb->tx.
- */
-static struct urb *alloc_tx_urb(struct zd_usb *usb)
-{
- struct zd_usb_tx *tx = &usb->tx;
- unsigned long flags;
- struct list_head *entry;
- struct urb *urb;
-
- spin_lock_irqsave(&tx->lock, flags);
- if (list_empty(&tx->free_urb_list)) {
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- goto out;
- }
- entry = tx->free_urb_list.next;
- list_del(entry);
- urb = list_entry(entry, struct urb, urb_list);
-out:
- spin_unlock_irqrestore(&tx->lock, flags);
- return urb;
-}
-
-/**
- * free_tx_urb - frees a used tx URB
- * @usb: a &struct zd_usb pointer
- * @urb: URB to be freed
- *
- * Frees the transmission URB, which means to put it on the free URB
- * list.
- */
-static void free_tx_urb(struct zd_usb *usb, struct urb *urb)
-{
- struct zd_usb_tx *tx = &usb->tx;
- unsigned long flags;
-
- spin_lock_irqsave(&tx->lock, flags);
- if (!tx->enabled) {
- usb_free_urb(urb);
- goto out;
- }
- list_add(&urb->urb_list, &tx->free_urb_list);
-out:
- spin_unlock_irqrestore(&tx->lock, flags);
-}
-
static void tx_dec_submitted_urbs(struct zd_usb *usb)
{
struct zd_usb_tx *tx = &usb->tx;
@@ -905,6 +925,16 @@ static void tx_urb_complete(struct urb *urb)
struct sk_buff *skb;
struct ieee80211_tx_info *info;
struct zd_usb *usb;
+ struct zd_usb_tx *tx;
+
+ skb = (struct sk_buff *)urb->context;
+ info = IEEE80211_SKB_CB(skb);
+ /*
+ * grab 'usb' pointer before handing off the skb (since
+ * it might be freed by zd_mac_tx_to_dev or mac80211)
+ */
+ usb = &zd_hw_mac(info->rate_driver_data[0])->chip.usb;
+ tx = &usb->tx;
switch (urb->status) {
case 0:
@@ -922,20 +952,16 @@ static void tx_urb_complete(struct urb *urb)
goto resubmit;
}
free_urb:
- skb = (struct sk_buff *)urb->context;
- /*
- * grab 'usb' pointer before handing off the skb (since
- * it might be freed by zd_mac_tx_to_dev or mac80211)
- */
- info = IEEE80211_SKB_CB(skb);
- usb = &zd_hw_mac(info->rate_driver_data[0])->chip.usb;
+ skb_unlink(skb, &usb->tx.submitted_skbs);
zd_mac_tx_to_dev(skb, urb->status);
- free_tx_urb(usb, urb);
+ usb_free_urb(urb);
tx_dec_submitted_urbs(usb);
return;
resubmit:
+ usb_anchor_urb(urb, &tx->submitted);
r = usb_submit_urb(urb, GFP_ATOMIC);
if (r) {
+ usb_unanchor_urb(urb);
dev_dbg_f(urb_dev(urb), "error resubmit urb %p %d\n", urb, r);
goto free_urb;
}
@@ -956,10 +982,17 @@ resubmit:
int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb)
{
int r;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct usb_device *udev = zd_usb_to_usbdev(usb);
struct urb *urb;
+ struct zd_usb_tx *tx = &usb->tx;
- urb = alloc_tx_urb(usb);
+ if (!atomic_read(&tx->enabled)) {
+ r = -ENOENT;
+ goto out;
+ }
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
r = -ENOMEM;
goto out;
@@ -968,17 +1001,118 @@ int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb)
usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT),
skb->data, skb->len, tx_urb_complete, skb);
+ info->rate_driver_data[1] = (void *)jiffies;
+ skb_queue_tail(&tx->submitted_skbs, skb);
+ usb_anchor_urb(urb, &tx->submitted);
+
r = usb_submit_urb(urb, GFP_ATOMIC);
- if (r)
+ if (r) {
+ dev_dbg_f(zd_usb_dev(usb), "error submit urb %p %d\n", urb, r);
+ usb_unanchor_urb(urb);
+ skb_unlink(skb, &tx->submitted_skbs);
goto error;
+ }
tx_inc_submitted_urbs(usb);
return 0;
error:
- free_tx_urb(usb, urb);
+ usb_free_urb(urb);
out:
return r;
}
+static bool zd_tx_timeout(struct zd_usb *usb)
+{
+ struct zd_usb_tx *tx = &usb->tx;
+ struct sk_buff_head *q = &tx->submitted_skbs;
+ struct sk_buff *skb, *skbnext;
+ struct ieee80211_tx_info *info;
+ unsigned long flags, trans_start;
+ bool have_timedout = false;
+
+ spin_lock_irqsave(&q->lock, flags);
+ skb_queue_walk_safe(q, skb, skbnext) {
+ info = IEEE80211_SKB_CB(skb);
+ trans_start = (unsigned long)info->rate_driver_data[1];
+
+ if (time_is_before_jiffies(trans_start + ZD_TX_TIMEOUT)) {
+ have_timedout = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return have_timedout;
+}
+
+static void zd_tx_watchdog_handler(struct work_struct *work)
+{
+ struct zd_usb *usb =
+ container_of(work, struct zd_usb, tx.watchdog_work.work);
+ struct zd_usb_tx *tx = &usb->tx;
+
+ if (!atomic_read(&tx->enabled) || !tx->watchdog_enabled)
+ goto out;
+ if (!zd_tx_timeout(usb))
+ goto out;
+
+ /* TX halted, try reset */
+ dev_warn(zd_usb_dev(usb), "TX-stall detected, reseting device...");
+
+ usb_queue_reset_device(usb->intf);
+
+ /* reset will stop this worker, don't rearm */
+ return;
+out:
+ queue_delayed_work(zd_workqueue, &tx->watchdog_work,
+ ZD_TX_WATCHDOG_INTERVAL);
+}
+
+void zd_tx_watchdog_enable(struct zd_usb *usb)
+{
+ struct zd_usb_tx *tx = &usb->tx;
+
+ if (!tx->watchdog_enabled) {
+ dev_dbg_f(zd_usb_dev(usb), "\n");
+ queue_delayed_work(zd_workqueue, &tx->watchdog_work,
+ ZD_TX_WATCHDOG_INTERVAL);
+ tx->watchdog_enabled = 1;
+ }
+}
+
+void zd_tx_watchdog_disable(struct zd_usb *usb)
+{
+ struct zd_usb_tx *tx = &usb->tx;
+
+ if (tx->watchdog_enabled) {
+ dev_dbg_f(zd_usb_dev(usb), "\n");
+ tx->watchdog_enabled = 0;
+ cancel_delayed_work_sync(&tx->watchdog_work);
+ }
+}
+
+static void zd_rx_idle_timer_handler(struct work_struct *work)
+{
+ struct zd_usb *usb =
+ container_of(work, struct zd_usb, rx.idle_work.work);
+ struct zd_mac *mac = zd_usb_to_mac(usb);
+
+ if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags))
+ return;
+
+ dev_dbg_f(zd_usb_dev(usb), "\n");
+
+ /* 30 seconds since last rx, reset rx */
+ zd_usb_reset_rx(usb);
+}
+
+void zd_usb_reset_rx_idle_timer(struct zd_usb *usb)
+{
+ struct zd_usb_rx *rx = &usb->rx;
+
+ cancel_delayed_work(&rx->idle_work);
+ queue_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL);
+}
+
static inline void init_usb_interrupt(struct zd_usb *usb)
{
struct zd_usb_interrupt *intr = &usb->intr;
@@ -993,22 +1127,27 @@ static inline void init_usb_rx(struct zd_usb *usb)
{
struct zd_usb_rx *rx = &usb->rx;
spin_lock_init(&rx->lock);
+ mutex_init(&rx->setup_mutex);
if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) {
rx->usb_packet_size = 512;
} else {
rx->usb_packet_size = 64;
}
ZD_ASSERT(rx->fragment_length == 0);
+ INIT_DELAYED_WORK(&rx->idle_work, zd_rx_idle_timer_handler);
}
static inline void init_usb_tx(struct zd_usb *usb)
{
struct zd_usb_tx *tx = &usb->tx;
spin_lock_init(&tx->lock);
- tx->enabled = 0;
+ atomic_set(&tx->enabled, 0);
tx->stopped = 0;
- INIT_LIST_HEAD(&tx->free_urb_list);
+ skb_queue_head_init(&tx->submitted_skbs);
+ init_usb_anchor(&tx->submitted);
tx->submitted_urbs = 0;
+ tx->watchdog_enabled = 0;
+ INIT_DELAYED_WORK(&tx->watchdog_work, zd_tx_watchdog_handler);
}
void zd_usb_init(struct zd_usb *usb, struct ieee80211_hw *hw,
@@ -1240,6 +1379,7 @@ static void disconnect(struct usb_interface *intf)
ieee80211_unregister_hw(hw);
/* Just in case something has gone wrong! */
+ zd_usb_disable_tx(usb);
zd_usb_disable_rx(usb);
zd_usb_disable_int(usb);
@@ -1255,11 +1395,92 @@ static void disconnect(struct usb_interface *intf)
dev_dbg(&intf->dev, "disconnected\n");
}
+static void zd_usb_resume(struct zd_usb *usb)
+{
+ struct zd_mac *mac = zd_usb_to_mac(usb);
+ int r;
+
+ dev_dbg_f(zd_usb_dev(usb), "\n");
+
+ r = zd_op_start(zd_usb_to_hw(usb));
+ if (r < 0) {
+ dev_warn(zd_usb_dev(usb), "Device resume failed "
+ "with error code %d. Retrying...\n", r);
+ if (usb->was_running)
+ set_bit(ZD_DEVICE_RUNNING, &mac->flags);
+ usb_queue_reset_device(usb->intf);
+ return;
+ }
+
+ if (mac->type != NL80211_IFTYPE_UNSPECIFIED) {
+ r = zd_restore_settings(mac);
+ if (r < 0) {
+ dev_dbg(zd_usb_dev(usb),
+ "failed to restore settings, %d\n", r);
+ return;
+ }
+ }
+}
+
+static void zd_usb_stop(struct zd_usb *usb)
+{
+ dev_dbg_f(zd_usb_dev(usb), "\n");
+
+ zd_op_stop(zd_usb_to_hw(usb));
+
+ zd_usb_disable_tx(usb);
+ zd_usb_disable_rx(usb);
+ zd_usb_disable_int(usb);
+
+ usb->initialized = 0;
+}
+
+static int pre_reset(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw = usb_get_intfdata(intf);
+ struct zd_mac *mac;
+ struct zd_usb *usb;
+
+ if (!hw || intf->condition != USB_INTERFACE_BOUND)
+ return 0;
+
+ mac = zd_hw_mac(hw);
+ usb = &mac->chip.usb;
+
+ usb->was_running = test_bit(ZD_DEVICE_RUNNING, &mac->flags);
+
+ zd_usb_stop(usb);
+
+ mutex_lock(&mac->chip.mutex);
+ return 0;
+}
+
+static int post_reset(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw = usb_get_intfdata(intf);
+ struct zd_mac *mac;
+ struct zd_usb *usb;
+
+ if (!hw || intf->condition != USB_INTERFACE_BOUND)
+ return 0;
+
+ mac = zd_hw_mac(hw);
+ usb = &mac->chip.usb;
+
+ mutex_unlock(&mac->chip.mutex);
+
+ if (usb->was_running)
+ zd_usb_resume(usb);
+ return 0;
+}
+
static struct usb_driver driver = {
.name = KBUILD_MODNAME,
.id_table = usb_ids,
.probe = probe,
.disconnect = disconnect,
+ .pre_reset = pre_reset,
+ .post_reset = post_reset,
};
struct workqueue_struct *zd_workqueue;
@@ -1393,15 +1614,20 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
return -EWOULDBLOCK;
}
if (!usb_int_enabled(usb)) {
- dev_dbg_f(zd_usb_dev(usb),
+ dev_dbg_f(zd_usb_dev(usb),
"error: usb interrupt not enabled\n");
return -EWOULDBLOCK;
}
+ ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+ BUILD_BUG_ON(sizeof(struct usb_req_read_regs) + USB_MAX_IOREAD16_COUNT *
+ sizeof(__le16) > sizeof(usb->req_buf));
+ BUG_ON(sizeof(struct usb_req_read_regs) + count * sizeof(__le16) >
+ sizeof(usb->req_buf));
+
req_len = sizeof(struct usb_req_read_regs) + count * sizeof(__le16);
- req = kmalloc(req_len, GFP_KERNEL);
- if (!req)
- return -ENOMEM;
+ req = (void *)usb->req_buf;
+
req->id = cpu_to_le16(USB_REQ_READ_REGS);
for (i = 0; i < count; i++)
req->addr[i] = cpu_to_le16((u16)addresses[i]);
@@ -1409,7 +1635,7 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
udev = zd_usb_to_usbdev(usb);
prepare_read_regs_int(usb);
r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
- req, req_len, &actual_req_len, 1000 /* ms */);
+ req, req_len, &actual_req_len, 50 /* ms */);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
"error in usb_bulk_msg(). Error number %d\n", r);
@@ -1424,7 +1650,7 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
}
timeout = wait_for_completion_timeout(&usb->intr.read_regs.completion,
- msecs_to_jiffies(1000));
+ msecs_to_jiffies(50));
if (!timeout) {
disable_read_regs_int(usb);
dev_dbg_f(zd_usb_dev(usb), "read timed out\n");
@@ -1434,7 +1660,6 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
r = get_results(usb, values, req, count);
error:
- kfree(req);
return r;
}
@@ -1460,11 +1685,17 @@ int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
return -EWOULDBLOCK;
}
+ ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+ BUILD_BUG_ON(sizeof(struct usb_req_write_regs) +
+ USB_MAX_IOWRITE16_COUNT * sizeof(struct reg_data) >
+ sizeof(usb->req_buf));
+ BUG_ON(sizeof(struct usb_req_write_regs) +
+ count * sizeof(struct reg_data) >
+ sizeof(usb->req_buf));
+
req_len = sizeof(struct usb_req_write_regs) +
count * sizeof(struct reg_data);
- req = kmalloc(req_len, GFP_KERNEL);
- if (!req)
- return -ENOMEM;
+ req = (void *)usb->req_buf;
req->id = cpu_to_le16(USB_REQ_WRITE_REGS);
for (i = 0; i < count; i++) {
@@ -1475,7 +1706,7 @@ int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
udev = zd_usb_to_usbdev(usb);
r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
- req, req_len, &actual_req_len, 1000 /* ms */);
+ req, req_len, &actual_req_len, 50 /* ms */);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
"error in usb_bulk_msg(). Error number %d\n", r);
@@ -1492,7 +1723,6 @@ int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
/* FALL-THROUGH with r == 0 */
error:
- kfree(req);
return r;
}
@@ -1537,14 +1767,19 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
if (r) {
dev_dbg_f(zd_usb_dev(usb),
"error %d: Couldn't read CR203\n", r);
- goto out;
+ return r;
}
bit_value_template &= ~(RF_IF_LE|RF_CLK|RF_DATA);
+ ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex));
+ BUILD_BUG_ON(sizeof(struct usb_req_rfwrite) +
+ USB_MAX_RFWRITE_BIT_COUNT * sizeof(__le16) >
+ sizeof(usb->req_buf));
+ BUG_ON(sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16) >
+ sizeof(usb->req_buf));
+
req_len = sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16);
- req = kmalloc(req_len, GFP_KERNEL);
- if (!req)
- return -ENOMEM;
+ req = (void *)usb->req_buf;
req->id = cpu_to_le16(USB_REQ_WRITE_RF);
/* 1: 3683a, but not used in ZYDAS driver */
@@ -1560,7 +1795,7 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
udev = zd_usb_to_usbdev(usb);
r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
- req, req_len, &actual_req_len, 1000 /* ms */);
+ req, req_len, &actual_req_len, 50 /* ms */);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
"error in usb_bulk_msg(). Error number %d\n", r);
@@ -1576,6 +1811,5 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
/* FALL-THROUGH with r == 0 */
out:
- kfree(req);
return r;
}
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 1b1655c..2d688f4 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -32,6 +32,10 @@
#define ZD_USB_TX_HIGH 5
#define ZD_USB_TX_LOW 2
+#define ZD_TX_TIMEOUT (HZ * 5)
+#define ZD_TX_WATCHDOG_INTERVAL round_jiffies_relative(HZ)
+#define ZD_RX_IDLE_INTERVAL round_jiffies_relative(30 * HZ)
+
enum devicetype {
DEVICE_ZD1211 = 0,
DEVICE_ZD1211B = 1,
@@ -162,6 +166,8 @@ struct zd_usb_interrupt {
struct read_regs_int read_regs;
spinlock_t lock;
struct urb *urb;
+ void *buffer;
+ dma_addr_t buffer_dma;
int interval;
u8 read_regs_enabled:1;
};
@@ -175,7 +181,9 @@ static inline struct usb_int_regs *get_read_regs(struct zd_usb_interrupt *intr)
struct zd_usb_rx {
spinlock_t lock;
- u8 fragment[2*USB_MAX_RX_SIZE];
+ struct mutex setup_mutex;
+ struct delayed_work idle_work;
+ u8 fragment[2 * USB_MAX_RX_SIZE];
unsigned int fragment_length;
unsigned int usb_packet_size;
struct urb **urbs;
@@ -184,19 +192,21 @@ struct zd_usb_rx {
/**
* struct zd_usb_tx - structure used for transmitting frames
+ * @enabled: atomic enabled flag, indicates whether tx is enabled
* @lock: lock for transmission
- * @free_urb_list: list of free URBs, contains all the URBs, which can be used
+ * @submitted: anchor for URBs sent to device
* @submitted_urbs: atomic integer that counts the URBs having sent to the
* device, which haven't been completed
- * @enabled: enabled flag, indicates whether tx is enabled
* @stopped: indicates whether higher level tx queues are stopped
*/
struct zd_usb_tx {
+ atomic_t enabled;
spinlock_t lock;
- struct list_head free_urb_list;
+ struct delayed_work watchdog_work;
+ struct sk_buff_head submitted_skbs;
+ struct usb_anchor submitted;
int submitted_urbs;
- int enabled;
- int stopped;
+ u8 stopped:1, watchdog_enabled:1;
};
/* Contains the usb parts. The structure doesn't require a lock because intf
@@ -207,7 +217,8 @@ struct zd_usb {
struct zd_usb_rx rx;
struct zd_usb_tx tx;
struct usb_interface *intf;
- u8 is_zd1211b:1, initialized:1;
+ u8 req_buf[64]; /* zd_usb_iowrite16v needs 62 bytes */
+ u8 is_zd1211b:1, initialized:1, was_running:1;
};
#define zd_usb_dev(usb) (&usb->intf->dev)
@@ -234,12 +245,17 @@ void zd_usb_clear(struct zd_usb *usb);
int zd_usb_scnprint_id(struct zd_usb *usb, char *buffer, size_t size);
+void zd_tx_watchdog_enable(struct zd_usb *usb);
+void zd_tx_watchdog_disable(struct zd_usb *usb);
+
int zd_usb_enable_int(struct zd_usb *usb);
void zd_usb_disable_int(struct zd_usb *usb);
int zd_usb_enable_rx(struct zd_usb *usb);
void zd_usb_disable_rx(struct zd_usb *usb);
+void zd_usb_reset_rx_idle_timer(struct zd_usb *usb);
+
void zd_usb_enable_tx(struct zd_usb *usb);
void zd_usb_disable_tx(struct zd_usb *usb);
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 359df04..9d339eb 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -103,6 +103,8 @@
#define AUDIT_BPRM_FCAPS 1321 /* Information about fcaps increasing perms */
#define AUDIT_CAPSET 1322 /* Record showing argument to sys_capset */
#define AUDIT_MMAP 1323 /* Record showing descriptor and flags in mmap */
+#define AUDIT_NETFILTER_PKT 1324 /* Packets traversing netfilter chains */
+#define AUDIT_NETFILTER_CFG 1325 /* Netfilter chain modifications */
#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */
diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h
new file mode 100644
index 0000000..473771a
--- /dev/null
+++ b/include/linux/cpu_rmap.h
@@ -0,0 +1,73 @@
+/*
+ * cpu_rmap.c: CPU affinity reverse-map support
+ * Copyright 2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+
+/**
+ * struct cpu_rmap - CPU affinity reverse-map
+ * @size: Number of objects to be reverse-mapped
+ * @used: Number of objects added
+ * @obj: Pointer to array of object pointers
+ * @near: For each CPU, the index and distance to the nearest object,
+ * based on affinity masks
+ */
+struct cpu_rmap {
+ u16 size, used;
+ void **obj;
+ struct {
+ u16 index;
+ u16 dist;
+ } near[0];
+};
+#define CPU_RMAP_DIST_INF 0xffff
+
+extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags);
+
+/**
+ * free_cpu_rmap - free CPU affinity reverse-map
+ * @rmap: Reverse-map allocated with alloc_cpu_rmap(), or %NULL
+ */
+static inline void free_cpu_rmap(struct cpu_rmap *rmap)
+{
+ kfree(rmap);
+}
+
+extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj);
+extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
+ const struct cpumask *affinity);
+
+static inline u16 cpu_rmap_lookup_index(struct cpu_rmap *rmap, unsigned int cpu)
+{
+ return rmap->near[cpu].index;
+}
+
+static inline void *cpu_rmap_lookup_obj(struct cpu_rmap *rmap, unsigned int cpu)
+{
+ return rmap->obj[rmap->near[cpu].index];
+}
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+
+/**
+ * alloc_irq_cpu_rmap - allocate CPU affinity reverse-map for IRQs
+ * @size: Number of objects to be mapped
+ *
+ * Must be called in process context.
+ */
+static inline struct cpu_rmap *alloc_irq_cpu_rmap(unsigned int size)
+{
+ return alloc_cpu_rmap(size, GFP_KERNEL);
+}
+extern void free_irq_cpu_rmap(struct cpu_rmap *rmap);
+
+extern int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq);
+
+#endif
diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h
index 66900e3..4c5b26e 100644
--- a/include/linux/dcbnl.h
+++ b/include/linux/dcbnl.h
@@ -25,6 +25,11 @@
/* IEEE 802.1Qaz std supported values */
#define IEEE_8021QAZ_MAX_TCS 8
+#define IEEE_8021QAZ_TSA_STRICT 0
+#define IEEE_8021QAZ_TSA_CB_SHABER 1
+#define IEEE_8021QAZ_TSA_ETS 2
+#define IEEE_8021QAZ_TSA_VENDOR 255
+
/* This structure contains the IEEE 802.1Qaz ETS managed object
*
* @willing: willing bit in ETS configuratin TLV
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 010e2d8..d638e85 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -279,8 +279,6 @@ enum dccp_state {
DCCP_MAX_STATES
};
-#define DCCP_STATE_MASK 0x1f
-
enum {
DCCPF_OPEN = TCPF_ESTABLISHED,
DCCPF_REQUESTING = TCPF_SYN_SENT,
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 1908929..54d776c 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -251,6 +251,7 @@ enum ethtool_stringset {
ETH_SS_STATS,
ETH_SS_PRIV_FLAGS,
ETH_SS_NTUPLE_FILTERS,
+ ETH_SS_FEATURES,
};
/* for passing string sets for data tagging */
@@ -523,6 +524,87 @@ struct ethtool_flash {
char data[ETHTOOL_FLASH_MAX_FILENAME];
};
+/* for returning and changing feature sets */
+
+/**
+ * struct ethtool_get_features_block - block with state of 32 features
+ * @available: mask of changeable features
+ * @requested: mask of features requested to be enabled if possible
+ * @active: mask of currently enabled features
+ * @never_changed: mask of features not changeable for any device
+ */
+struct ethtool_get_features_block {
+ __u32 available;
+ __u32 requested;
+ __u32 active;
+ __u32 never_changed;
+};
+
+/**
+ * struct ethtool_gfeatures - command to get state of device's features
+ * @cmd: command number = %ETHTOOL_GFEATURES
+ * @size: in: number of elements in the features[] array;
+ * out: number of elements in features[] needed to hold all features
+ * @features: state of features
+ */
+struct ethtool_gfeatures {
+ __u32 cmd;
+ __u32 size;
+ struct ethtool_get_features_block features[0];
+};
+
+/**
+ * struct ethtool_set_features_block - block with request for 32 features
+ * @valid: mask of features to be changed
+ * @requested: values of features to be changed
+ */
+struct ethtool_set_features_block {
+ __u32 valid;
+ __u32 requested;
+};
+
+/**
+ * struct ethtool_sfeatures - command to request change in device's features
+ * @cmd: command number = %ETHTOOL_SFEATURES
+ * @size: array size of the features[] array
+ * @features: feature change masks
+ */
+struct ethtool_sfeatures {
+ __u32 cmd;
+ __u32 size;
+ struct ethtool_set_features_block features[0];
+};
+
+/*
+ * %ETHTOOL_SFEATURES changes features present in features[].valid to the
+ * values of corresponding bits in features[].requested. Bits in .requested
+ * not set in .valid or not changeable are ignored.
+ *
+ * Returns %EINVAL when .valid contains undefined or never-changable bits
+ * or size is not equal to required number of features words (32-bit blocks).
+ * Returns >= 0 if request was completed; bits set in the value mean:
+ * %ETHTOOL_F_UNSUPPORTED - there were bits set in .valid that are not
+ * changeable (not present in %ETHTOOL_GFEATURES' features[].available)
+ * those bits were ignored.
+ * %ETHTOOL_F_WISH - some or all changes requested were recorded but the
+ * resulting state of bits masked by .valid is not equal to .requested.
+ * Probably there are other device-specific constraints on some features
+ * in the set. When %ETHTOOL_F_UNSUPPORTED is set, .valid is considered
+ * here as though ignored bits were cleared.
+ *
+ * Meaning of bits in the masks are obtained by %ETHTOOL_GSSET_INFO (number of
+ * bits in the arrays - always multiple of 32) and %ETHTOOL_GSTRINGS commands
+ * for ETH_SS_FEATURES string set. First entry in the table corresponds to least
+ * significant bit in features[0] fields. Empty strings mark undefined features.
+ */
+enum ethtool_sfeatures_retval_bits {
+ ETHTOOL_F_UNSUPPORTED__BIT,
+ ETHTOOL_F_WISH__BIT,
+};
+
+#define ETHTOOL_F_UNSUPPORTED (1 << ETHTOOL_F_UNSUPPORTED__BIT)
+#define ETHTOOL_F_WISH (1 << ETHTOOL_F_WISH__BIT)
+
#ifdef __KERNEL__
#include <linux/rculist.h>
@@ -543,7 +625,6 @@ struct net_device;
/* Some generic methods drivers may use in their ethtool_ops */
u32 ethtool_op_get_link(struct net_device *dev);
-u32 ethtool_op_get_rx_csum(struct net_device *dev);
u32 ethtool_op_get_tx_csum(struct net_device *dev);
int ethtool_op_set_tx_csum(struct net_device *dev, u32 data);
int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data);
@@ -744,6 +825,9 @@ struct ethtool_ops {
#define ETHTOOL_GRXFHINDIR 0x00000038 /* Get RX flow hash indir'n table */
#define ETHTOOL_SRXFHINDIR 0x00000039 /* Set RX flow hash indir'n table */
+#define ETHTOOL_GFEATURES 0x0000003a /* Get device offload settings */
+#define ETHTOOL_SFEATURES 0x0000003b /* Change device offload settings */
+
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
#define SPARC_ETH_SSET ETHTOOL_SSET
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 6485d2a..f4a2e6b 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -135,6 +135,7 @@ enum {
IFLA_VF_PORTS,
IFLA_PORT_SELF,
IFLA_AF_SPEC,
+ IFLA_GROUP, /* Group the device belongs to */
__IFLA_MAX
};
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index ae8fdc5..5f81466 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -144,6 +144,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY)
struct in_ifaddr {
+ struct hlist_node hash;
struct in_ifaddr *ifa_next;
struct in_device *ifa_dev;
struct rcu_head rcu_head;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 55e0d42..63c5ad7 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -14,6 +14,8 @@
#include <linux/smp.h>
#include <linux/percpu.h>
#include <linux/hrtimer.h>
+#include <linux/kref.h>
+#include <linux/workqueue.h>
#include <asm/atomic.h>
#include <asm/ptrace.h>
@@ -240,6 +242,35 @@ extern int irq_can_set_affinity(unsigned int irq);
extern int irq_select_affinity(unsigned int irq);
extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
+
+/**
+ * struct irq_affinity_notify - context for notification of IRQ affinity changes
+ * @irq: Interrupt to which notification applies
+ * @kref: Reference count, for internal use
+ * @work: Work item, for internal use
+ * @notify: Function to be called on change. This will be
+ * called in process context.
+ * @release: Function to be called on release. This will be
+ * called in process context. Once registered, the
+ * structure must only be freed when this function is
+ * called or later.
+ */
+struct irq_affinity_notify {
+ unsigned int irq;
+ struct kref kref;
+ struct work_struct work;
+ void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
+ void (*release)(struct kref *ref);
+};
+
+extern int
+irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
+
+static inline void irq_run_affinity_notifiers(void)
+{
+ flush_scheduled_work();
+}
+
#else /* CONFIG_SMP */
static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
@@ -255,7 +286,7 @@ static inline int irq_can_set_affinity(unsigned int irq)
static inline int irq_select_affinity(unsigned int irq) { return 0; }
static inline int irq_set_affinity_hint(unsigned int irq,
- const struct cpumask *m)
+ const struct cpumask *m)
{
return -EINVAL;
}
diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h
index 5f43a3b..4deb383 100644
--- a/include/linux/ip_vs.h
+++ b/include/linux/ip_vs.h
@@ -89,6 +89,14 @@
#define IP_VS_CONN_F_TEMPLATE 0x1000 /* template, not connection */
#define IP_VS_CONN_F_ONE_PACKET 0x2000 /* forward only one packet */
+#define IP_VS_CONN_F_BACKUP_MASK (IP_VS_CONN_F_FWD_MASK | \
+ IP_VS_CONN_F_NOOUTPUT | \
+ IP_VS_CONN_F_INACTIVE | \
+ IP_VS_CONN_F_SEQ_MASK | \
+ IP_VS_CONN_F_NO_CPORT | \
+ IP_VS_CONN_F_TEMPLATE \
+ )
+
/* Flags that are not sent to backup server start from bit 16 */
#define IP_VS_CONN_F_NFCT (1 << 16) /* use netfilter conntrack */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index c1a95b7..bfef56d 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -8,6 +8,7 @@
* For now it's included from <linux/irq.h>
*/
+struct irq_affinity_notify;
struct proc_dir_entry;
struct timer_rand_state;
/**
@@ -24,6 +25,7 @@ struct timer_rand_state;
* @last_unhandled: aging timer for unhandled count
* @irqs_unhandled: stats field for spurious unhandled interrupts
* @lock: locking for SMP
+ * @affinity_notify: context for notification of affinity changes
* @pending_mask: pending rebalanced interrupts
* @threads_active: number of irqaction threads currently running
* @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
@@ -70,6 +72,7 @@ struct irq_desc {
raw_spinlock_t lock;
#ifdef CONFIG_SMP
const struct cpumask *affinity_hint;
+ struct irq_affinity_notify *affinity_notify;
#ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_var_t pending_mask;
#endif
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
new file mode 100644
index 0000000..dd8da34
--- /dev/null
+++ b/include/linux/micrel_phy.h
@@ -0,0 +1,16 @@
+#ifndef _MICREL_PHY_H
+#define _MICREL_PHY_H
+
+#define MICREL_PHY_ID_MASK 0x00fffff0
+
+#define PHY_ID_KSZ9021 0x00221611
+#define PHY_ID_KS8737 0x00221720
+#define PHY_ID_KS8041 0x00221510
+#define PHY_ID_KS8051 0x00221550
+/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */
+#define PHY_ID_KS8001 0x0022161A
+
+/* struct phy_device dev_flags definitions */
+#define MICREL_PHY_50MHZ_CLK 0x00000001
+
+#endif /* _MICREL_PHY_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d971346..ffe56c1 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -75,6 +75,9 @@ struct wireless_dev;
#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
#define NET_RX_DROP 1 /* packet dropped */
+/* Initial net device group. All devices belong to group 0 by default. */
+#define INIT_NETDEV_GROUP 0
+
/*
* Transmit return codes: transmit return codes originate from three different
* namespaces:
@@ -551,14 +554,16 @@ struct rps_map {
#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
/*
- * The rps_dev_flow structure contains the mapping of a flow to a CPU and the
- * tail pointer for that CPU's input queue at the time of last enqueue.
+ * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
+ * tail pointer for that CPU's input queue at the time of last enqueue, and
+ * a hardware filter index.
*/
struct rps_dev_flow {
u16 cpu;
- u16 fill;
+ u16 filter;
unsigned int last_qtail;
};
+#define RPS_NO_FILTER 0xffff
/*
* The rps_dev_flow_table structure contains a table of flow mappings.
@@ -608,6 +613,11 @@ static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
+#ifdef CONFIG_RFS_ACCEL
+extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
+ u32 flow_id, u16 filter_id);
+#endif
+
/* This structure contains an instance of an RX queue. */
struct netdev_rx_queue {
struct rps_map __rcu *rps_map;
@@ -643,6 +653,14 @@ struct xps_dev_maps {
(nr_cpu_ids * sizeof(struct xps_map *)))
#endif /* CONFIG_XPS */
+#define TC_MAX_QUEUE 16
+#define TC_BITMASK 15
+/* HW offloaded queuing disciplines txq count and offset maps */
+struct netdev_tc_txq {
+ u16 count;
+ u16 offset;
+};
+
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
@@ -753,6 +771,38 @@ struct xps_dev_maps {
* int (*ndo_set_vf_port)(struct net_device *dev, int vf,
* struct nlattr *port[]);
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
+ * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
+ * Called to setup 'tc' number of traffic classes in the net device. This
+ * is always called from the stack with the rtnl lock held and netif tx
+ * queues stopped. This allows the netdevice to perform queue management
+ * safely.
+ *
+ * RFS acceleration.
+ * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
+ * u16 rxq_index, u32 flow_id);
+ * Set hardware filter for RFS. rxq_index is the target queue index;
+ * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
+ * Return the filter ID on success, or a negative error code.
+ *
+ * Slave management functions (for bridge, bonding, etc). User should
+ * call netdev_set_master() to set dev->master properly.
+ * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
+ * Called to make another netdev an underling.
+ *
+ * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
+ * Called to release previously enslaved netdev.
+ *
+ * Feature/offload setting functions.
+ * u32 (*ndo_fix_features)(struct net_device *dev, u32 features);
+ * Adjusts the requested feature flags according to device-specific
+ * constraints, and returns the resulting flags. Must not modify
+ * the device state.
+ *
+ * int (*ndo_set_features)(struct net_device *dev, u32 features);
+ * Called to update device configuration to new features. Passed
+ * feature set might be less than what was returned by ndo_fix_features()).
+ * Must return >0 or -errno if it changed dev->features itself.
+ *
*/
#define HAVE_NET_DEVICE_OPS
struct net_device_ops {
@@ -811,6 +861,7 @@ struct net_device_ops {
struct nlattr *port[]);
int (*ndo_get_vf_port)(struct net_device *dev,
int vf, struct sk_buff *skb);
+ int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
int (*ndo_fcoe_enable)(struct net_device *dev);
int (*ndo_fcoe_disable)(struct net_device *dev);
@@ -825,6 +876,20 @@ struct net_device_ops {
int (*ndo_fcoe_get_wwn)(struct net_device *dev,
u64 *wwn, int type);
#endif
+#ifdef CONFIG_RFS_ACCEL
+ int (*ndo_rx_flow_steer)(struct net_device *dev,
+ const struct sk_buff *skb,
+ u16 rxq_index,
+ u32 flow_id);
+#endif
+ int (*ndo_add_slave)(struct net_device *dev,
+ struct net_device *slave_dev);
+ int (*ndo_del_slave)(struct net_device *dev,
+ struct net_device *slave_dev);
+ u32 (*ndo_fix_features)(struct net_device *dev,
+ u32 features);
+ int (*ndo_set_features)(struct net_device *dev,
+ u32 features);
};
/*
@@ -876,8 +941,18 @@ struct net_device {
struct list_head napi_list;
struct list_head unreg_list;
- /* Net device features */
- unsigned long features;
+ /* currently active device features */
+ u32 features;
+ /* user-changeable features */
+ u32 hw_features;
+ /* user-requested features */
+ u32 wanted_features;
+ /* VLAN feature mask */
+ u32 vlan_features;
+
+ /* Net device feature bits; if you change something,
+ * also update netdev_features_strings[] in ethtool.c */
+
#define NETIF_F_SG 1 /* Scatter/gather IO. */
#define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
@@ -902,6 +977,7 @@ struct net_device {
#define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
#define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */
#define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */
+#define NETIF_F_RXCSUM (1 << 29) /* Receive checksumming offload */
/* Segmentation offload features */
#define NETIF_F_GSO_SHIFT 16
@@ -913,6 +989,12 @@ struct net_device {
#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
#define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
+ /* Features valid for ethtool to change */
+ /* = all defined minus driver/device-class-related */
+#define NETIF_F_NEVER_CHANGE (NETIF_F_HIGHDMA | NETIF_F_VLAN_CHALLENGED | \
+ NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
+#define NETIF_F_ETHTOOL_BITS (0x3f3fffff & ~NETIF_F_NEVER_CHANGE)
+
/* List of features with software fallbacks. */
#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
NETIF_F_TSO6 | NETIF_F_UFO)
@@ -923,6 +1005,12 @@ struct net_device {
#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
+#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+
+#define NETIF_F_ALL_TX_OFFLOADS (NETIF_F_ALL_CSUM | NETIF_F_SG | \
+ NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+ NETIF_F_SCTP_CSUM | NETIF_F_FCOE_CRC)
+
/*
* If one device supports one of these features, then enable them
* for all in netdev_increment_features.
@@ -931,6 +1019,9 @@ struct net_device {
NETIF_F_SG | NETIF_F_HIGHDMA | \
NETIF_F_FRAGLIST)
+ /* changeable features with no special hardware requirements */
+#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
+
/* Interface index. Unique device identifier */
int ifindex;
int iflink;
@@ -1039,6 +1130,13 @@ struct net_device {
/* Number of RX queues currently active in device */
unsigned int real_num_rx_queues;
+
+#ifdef CONFIG_RFS_ACCEL
+ /* CPU reverse-mapping for RX completion interrupts, indexed
+ * by RX queue number. Assigned by driver. This must only be
+ * set if the ndo_rx_flow_steer operation is defined. */
+ struct cpu_rmap *rx_cpu_rmap;
+#endif
#endif
rx_handler_func_t __rcu *rx_handler;
@@ -1132,9 +1230,6 @@ struct net_device {
/* rtnetlink link ops */
const struct rtnl_link_ops *rtnl_link_ops;
- /* VLAN feature mask */
- unsigned long vlan_features;
-
/* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SIZE 65536
unsigned int gso_max_size;
@@ -1143,6 +1238,9 @@ struct net_device {
/* Data Center Bridging netlink ops */
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
+ u8 num_tc;
+ struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
+ u8 prio_tc_map[TC_BITMASK + 1];
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
/* max exchange id for FCoE LRO by ddp */
@@ -1153,12 +1251,66 @@ struct net_device {
/* phy device may attach itself for hardware timestamping */
struct phy_device *phydev;
+
+ /* group the device belongs to */
+ int group;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
#define NETDEV_ALIGN 32
static inline
+int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
+{
+ return dev->prio_tc_map[prio & TC_BITMASK];
+}
+
+static inline
+int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
+{
+ if (tc >= dev->num_tc)
+ return -EINVAL;
+
+ dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
+ return 0;
+}
+
+static inline
+void netdev_reset_tc(struct net_device *dev)
+{
+ dev->num_tc = 0;
+ memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
+ memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
+}
+
+static inline
+int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
+{
+ if (tc >= dev->num_tc)
+ return -EINVAL;
+
+ dev->tc_to_txq[tc].count = count;
+ dev->tc_to_txq[tc].offset = offset;
+ return 0;
+}
+
+static inline
+int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
+{
+ if (num_tc > TC_MAX_QUEUE)
+ return -EINVAL;
+
+ dev->num_tc = num_tc;
+ return 0;
+}
+
+static inline
+int netdev_get_num_tc(struct net_device *dev)
+{
+ return dev->num_tc;
+}
+
+static inline
struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
unsigned int index)
{
@@ -1300,7 +1452,7 @@ struct packet_type {
struct packet_type *,
struct net_device *);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
- int features);
+ u32 features);
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
@@ -1345,7 +1497,7 @@ static inline struct net_device *next_net_device_rcu(struct net_device *dev)
struct net *net;
net = dev_net(dev);
- lh = rcu_dereference(dev->dev_list.next);
+ lh = rcu_dereference(list_next_rcu(&dev->dev_list));
return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
}
@@ -1355,6 +1507,13 @@ static inline struct net_device *first_net_device(struct net *net)
net_device_entry(net->dev_base_head.next);
}
+static inline struct net_device *first_net_device_rcu(struct net *net)
+{
+ struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
+
+ return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
+}
+
extern int netdev_boot_setup_check(struct net_device *dev);
extern unsigned long netdev_boot_base(const char *prefix, int unit);
extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
@@ -1844,6 +2003,7 @@ extern int dev_set_alias(struct net_device *, const char *, size_t);
extern int dev_change_net_namespace(struct net_device *,
struct net *, const char *);
extern int dev_set_mtu(struct net_device *, int);
+extern void dev_set_group(struct net_device *, int);
extern int dev_set_mac_address(struct net_device *,
struct sockaddr *);
extern int dev_hard_start_xmit(struct sk_buff *skb,
@@ -2267,8 +2427,10 @@ extern int netdev_max_backlog;
extern int netdev_tstamp_prequeue;
extern int weight_p;
extern int netdev_set_master(struct net_device *dev, struct net_device *master);
+extern int netdev_set_bond_master(struct net_device *dev,
+ struct net_device *master);
extern int skb_checksum_help(struct sk_buff *skb);
-extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
+extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features);
#ifdef CONFIG_BUG
extern void netdev_rx_csum_fault(struct net_device *dev);
#else
@@ -2295,22 +2457,26 @@ extern char *netdev_drivername(const struct net_device *dev, char *buffer, int l
extern void linkwatch_run_queue(void);
-unsigned long netdev_increment_features(unsigned long all, unsigned long one,
- unsigned long mask);
-unsigned long netdev_fix_features(unsigned long features, const char *name);
+static inline u32 netdev_get_wanted_features(struct net_device *dev)
+{
+ return (dev->features & ~dev->hw_features) | dev->wanted_features;
+}
+u32 netdev_increment_features(u32 all, u32 one, u32 mask);
+u32 netdev_fix_features(struct net_device *dev, u32 features);
+void netdev_update_features(struct net_device *dev);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);
-int netif_skb_features(struct sk_buff *skb);
+u32 netif_skb_features(struct sk_buff *skb);
-static inline int net_gso_ok(int features, int gso_type)
+static inline int net_gso_ok(u32 features, int gso_type)
{
int feature = gso_type << NETIF_F_GSO_SHIFT;
return (features & feature) == feature;
}
-static inline int skb_gso_ok(struct sk_buff *skb, int features)
+static inline int skb_gso_ok(struct sk_buff *skb, u32 features)
{
return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
(!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
@@ -2328,15 +2494,9 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
dev->gso_max_size = size;
}
-extern int __skb_bond_should_drop(struct sk_buff *skb,
- struct net_device *master);
-
-static inline int skb_bond_should_drop(struct sk_buff *skb,
- struct net_device *master)
+static inline int netif_is_bond_slave(struct net_device *dev)
{
- if (master)
- return __skb_bond_should_drop(skb, master);
- return 0;
+ return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
}
extern struct pernet_operations __net_initdata loopback_net_ops;
@@ -2351,6 +2511,8 @@ static inline int dev_ethtool_get_settings(struct net_device *dev,
static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
{
+ if (dev->hw_features & NETIF_F_RXCSUM)
+ return !!(dev->features & NETIF_F_RXCSUM);
if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
return 0;
return dev->ethtool_ops->get_rx_csum(dev);
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 1893837..eeec00a 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -24,16 +24,20 @@
#define NF_MAX_VERDICT NF_STOP
/* we overload the higher bits for encoding auxiliary data such as the queue
- * number. Not nice, but better than additional function arguments. */
-#define NF_VERDICT_MASK 0x0000ffff
-#define NF_VERDICT_BITS 16
+ * number or errno values. Not nice, but better than additional function
+ * arguments. */
+#define NF_VERDICT_MASK 0x000000ff
+
+/* extra verdict flags have mask 0x0000ff00 */
+#define NF_VERDICT_FLAG_QUEUE_BYPASS 0x00008000
+/* queue number (NF_QUEUE) or errno (NF_DROP) */
#define NF_VERDICT_QMASK 0xffff0000
#define NF_VERDICT_QBITS 16
-#define NF_QUEUE_NR(x) ((((x) << NF_VERDICT_BITS) & NF_VERDICT_QMASK) | NF_QUEUE)
+#define NF_QUEUE_NR(x) ((((x) << 16) & NF_VERDICT_QMASK) | NF_QUEUE)
-#define NF_DROP_ERR(x) (((-x) << NF_VERDICT_BITS) | NF_DROP)
+#define NF_DROP_ERR(x) (((-x) << 16) | NF_DROP)
/* only for userspace compatibility */
#ifndef __KERNEL__
@@ -41,6 +45,9 @@
<= 0x2000 is used for protocol-flags. */
#define NFC_UNKNOWN 0x4000
#define NFC_ALTERED 0x8000
+
+/* NF_VERDICT_BITS should be 8 now, but userspace might break if this changes */
+#define NF_VERDICT_BITS 16
#endif
enum nf_inet_hooks {
@@ -72,6 +79,10 @@ union nf_inet_addr {
#ifdef __KERNEL__
#ifdef CONFIG_NETFILTER
+static inline int NF_DROP_GETERR(int verdict)
+{
+ return -(verdict >> NF_VERDICT_QBITS);
+}
static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
const union nf_inet_addr *a2)
@@ -267,7 +278,7 @@ struct nf_afinfo {
int route_key_size;
};
-extern const struct nf_afinfo *nf_afinfo[NFPROTO_NUMPROTO];
+extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO];
static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family)
{
return rcu_dereference(nf_afinfo[family]);
@@ -357,9 +368,9 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
#endif /*CONFIG_NETFILTER*/
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
+extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu;
extern void nf_ct_attach(struct sk_buff *, struct sk_buff *);
-extern void (*nf_ct_destroy)(struct nf_conntrack *);
+extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
#else
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
#endif
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
index 9d40eff..15e83bf 100644
--- a/include/linux/netfilter/Kbuild
+++ b/include/linux/netfilter/Kbuild
@@ -1,3 +1,5 @@
+header-y += ipset/
+
header-y += nf_conntrack_common.h
header-y += nf_conntrack_ftp.h
header-y += nf_conntrack_sctp.h
@@ -9,6 +11,7 @@ header-y += nfnetlink_conntrack.h
header-y += nfnetlink_log.h
header-y += nfnetlink_queue.h
header-y += x_tables.h
+header-y += xt_AUDIT.h
header-y += xt_CHECKSUM.h
header-y += xt_CLASSIFY.h
header-y += xt_CONNMARK.h
@@ -34,6 +37,7 @@ header-y += xt_connmark.h
header-y += xt_conntrack.h
header-y += xt_cpu.h
header-y += xt_dccp.h
+header-y += xt_devgroup.h
header-y += xt_dscp.h
header-y += xt_esp.h
header-y += xt_hashlimit.h
@@ -54,7 +58,9 @@ header-y += xt_quota.h
header-y += xt_rateest.h
header-y += xt_realm.h
header-y += xt_recent.h
+header-y += xt_set.h
header-y += xt_sctp.h
+header-y += xt_socket.h
header-y += xt_state.h
header-y += xt_statistic.h
header-y += xt_string.h
diff --git a/include/linux/netfilter/ipset/Kbuild b/include/linux/netfilter/ipset/Kbuild
new file mode 100644
index 0000000..601fe71
--- /dev/null
+++ b/include/linux/netfilter/ipset/Kbuild
@@ -0,0 +1,4 @@
+header-y += ip_set.h
+header-y += ip_set_bitmap.h
+header-y += ip_set_hash.h
+header-y += ip_set_list.h
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
new file mode 100644
index 0000000..ec333d8
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -0,0 +1,452 @@
+#ifndef _IP_SET_H
+#define _IP_SET_H
+
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* The protocol version */
+#define IPSET_PROTOCOL 6
+
+/* The max length of strings including NUL: set and type identifiers */
+#define IPSET_MAXNAMELEN 32
+
+/* Message types and commands */
+enum ipset_cmd {
+ IPSET_CMD_NONE,
+ IPSET_CMD_PROTOCOL, /* 1: Return protocol version */
+ IPSET_CMD_CREATE, /* 2: Create a new (empty) set */
+ IPSET_CMD_DESTROY, /* 3: Destroy a (empty) set */
+ IPSET_CMD_FLUSH, /* 4: Remove all elements from a set */
+ IPSET_CMD_RENAME, /* 5: Rename a set */
+ IPSET_CMD_SWAP, /* 6: Swap two sets */
+ IPSET_CMD_LIST, /* 7: List sets */
+ IPSET_CMD_SAVE, /* 8: Save sets */
+ IPSET_CMD_ADD, /* 9: Add an element to a set */
+ IPSET_CMD_DEL, /* 10: Delete an element from a set */
+ IPSET_CMD_TEST, /* 11: Test an element in a set */
+ IPSET_CMD_HEADER, /* 12: Get set header data only */
+ IPSET_CMD_TYPE, /* 13: Get set type */
+ IPSET_MSG_MAX, /* Netlink message commands */
+
+ /* Commands in userspace: */
+ IPSET_CMD_RESTORE = IPSET_MSG_MAX, /* 14: Enter restore mode */
+ IPSET_CMD_HELP, /* 15: Get help */
+ IPSET_CMD_VERSION, /* 16: Get program version */
+ IPSET_CMD_QUIT, /* 17: Quit from interactive mode */
+
+ IPSET_CMD_MAX,
+
+ IPSET_CMD_COMMIT = IPSET_CMD_MAX, /* 18: Commit buffered commands */
+};
+
+/* Attributes at command level */
+enum {
+ IPSET_ATTR_UNSPEC,
+ IPSET_ATTR_PROTOCOL, /* 1: Protocol version */
+ IPSET_ATTR_SETNAME, /* 2: Name of the set */
+ IPSET_ATTR_TYPENAME, /* 3: Typename */
+ IPSET_ATTR_SETNAME2 = IPSET_ATTR_TYPENAME, /* Setname at rename/swap */
+ IPSET_ATTR_REVISION, /* 4: Settype revision */
+ IPSET_ATTR_FAMILY, /* 5: Settype family */
+ IPSET_ATTR_FLAGS, /* 6: Flags at command level */
+ IPSET_ATTR_DATA, /* 7: Nested attributes */
+ IPSET_ATTR_ADT, /* 8: Multiple data containers */
+ IPSET_ATTR_LINENO, /* 9: Restore lineno */
+ IPSET_ATTR_PROTOCOL_MIN, /* 10: Minimal supported version number */
+ IPSET_ATTR_REVISION_MIN = IPSET_ATTR_PROTOCOL_MIN, /* type rev min */
+ __IPSET_ATTR_CMD_MAX,
+};
+#define IPSET_ATTR_CMD_MAX (__IPSET_ATTR_CMD_MAX - 1)
+
+/* CADT specific attributes */
+enum {
+ IPSET_ATTR_IP = IPSET_ATTR_UNSPEC + 1,
+ IPSET_ATTR_IP_FROM = IPSET_ATTR_IP,
+ IPSET_ATTR_IP_TO, /* 2 */
+ IPSET_ATTR_CIDR, /* 3 */
+ IPSET_ATTR_PORT, /* 4 */
+ IPSET_ATTR_PORT_FROM = IPSET_ATTR_PORT,
+ IPSET_ATTR_PORT_TO, /* 5 */
+ IPSET_ATTR_TIMEOUT, /* 6 */
+ IPSET_ATTR_PROTO, /* 7 */
+ IPSET_ATTR_CADT_FLAGS, /* 8 */
+ IPSET_ATTR_CADT_LINENO = IPSET_ATTR_LINENO, /* 9 */
+ /* Reserve empty slots */
+ IPSET_ATTR_CADT_MAX = 16,
+ /* Create-only specific attributes */
+ IPSET_ATTR_GC,
+ IPSET_ATTR_HASHSIZE,
+ IPSET_ATTR_MAXELEM,
+ IPSET_ATTR_NETMASK,
+ IPSET_ATTR_PROBES,
+ IPSET_ATTR_RESIZE,
+ IPSET_ATTR_SIZE,
+ /* Kernel-only */
+ IPSET_ATTR_ELEMENTS,
+ IPSET_ATTR_REFERENCES,
+ IPSET_ATTR_MEMSIZE,
+
+ __IPSET_ATTR_CREATE_MAX,
+};
+#define IPSET_ATTR_CREATE_MAX (__IPSET_ATTR_CREATE_MAX - 1)
+
+/* ADT specific attributes */
+enum {
+ IPSET_ATTR_ETHER = IPSET_ATTR_CADT_MAX + 1,
+ IPSET_ATTR_NAME,
+ IPSET_ATTR_NAMEREF,
+ IPSET_ATTR_IP2,
+ IPSET_ATTR_CIDR2,
+ __IPSET_ATTR_ADT_MAX,
+};
+#define IPSET_ATTR_ADT_MAX (__IPSET_ATTR_ADT_MAX - 1)
+
+/* IP specific attributes */
+enum {
+ IPSET_ATTR_IPADDR_IPV4 = IPSET_ATTR_UNSPEC + 1,
+ IPSET_ATTR_IPADDR_IPV6,
+ __IPSET_ATTR_IPADDR_MAX,
+};
+#define IPSET_ATTR_IPADDR_MAX (__IPSET_ATTR_IPADDR_MAX - 1)
+
+/* Error codes */
+enum ipset_errno {
+ IPSET_ERR_PRIVATE = 4096,
+ IPSET_ERR_PROTOCOL,
+ IPSET_ERR_FIND_TYPE,
+ IPSET_ERR_MAX_SETS,
+ IPSET_ERR_BUSY,
+ IPSET_ERR_EXIST_SETNAME2,
+ IPSET_ERR_TYPE_MISMATCH,
+ IPSET_ERR_EXIST,
+ IPSET_ERR_INVALID_CIDR,
+ IPSET_ERR_INVALID_NETMASK,
+ IPSET_ERR_INVALID_FAMILY,
+ IPSET_ERR_TIMEOUT,
+ IPSET_ERR_REFERENCED,
+ IPSET_ERR_IPADDR_IPV4,
+ IPSET_ERR_IPADDR_IPV6,
+
+ /* Type specific error codes */
+ IPSET_ERR_TYPE_SPECIFIC = 4352,
+};
+
+/* Flags at command level */
+enum ipset_cmd_flags {
+ IPSET_FLAG_BIT_EXIST = 0,
+ IPSET_FLAG_EXIST = (1 << IPSET_FLAG_BIT_EXIST),
+};
+
+/* Flags at CADT attribute level */
+enum ipset_cadt_flags {
+ IPSET_FLAG_BIT_BEFORE = 0,
+ IPSET_FLAG_BEFORE = (1 << IPSET_FLAG_BIT_BEFORE),
+};
+
+/* Commands with settype-specific attributes */
+enum ipset_adt {
+ IPSET_ADD,
+ IPSET_DEL,
+ IPSET_TEST,
+ IPSET_ADT_MAX,
+ IPSET_CREATE = IPSET_ADT_MAX,
+ IPSET_CADT_MAX,
+};
+
+#ifdef __KERNEL__
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/vmalloc.h>
+#include <net/netlink.h>
+
+/* Sets are identified by an index in kernel space. Tweak with ip_set_id_t
+ * and IPSET_INVALID_ID if you want to increase the max number of sets.
+ */
+typedef u16 ip_set_id_t;
+
+#define IPSET_INVALID_ID 65535
+
+enum ip_set_dim {
+ IPSET_DIM_ZERO = 0,
+ IPSET_DIM_ONE,
+ IPSET_DIM_TWO,
+ IPSET_DIM_THREE,
+ /* Max dimension in elements.
+ * If changed, new revision of iptables match/target is required.
+ */
+ IPSET_DIM_MAX = 6,
+};
+
+/* Option flags for kernel operations */
+enum ip_set_kopt {
+ IPSET_INV_MATCH = (1 << IPSET_DIM_ZERO),
+ IPSET_DIM_ONE_SRC = (1 << IPSET_DIM_ONE),
+ IPSET_DIM_TWO_SRC = (1 << IPSET_DIM_TWO),
+ IPSET_DIM_THREE_SRC = (1 << IPSET_DIM_THREE),
+};
+
+/* Set features */
+enum ip_set_feature {
+ IPSET_TYPE_IP_FLAG = 0,
+ IPSET_TYPE_IP = (1 << IPSET_TYPE_IP_FLAG),
+ IPSET_TYPE_PORT_FLAG = 1,
+ IPSET_TYPE_PORT = (1 << IPSET_TYPE_PORT_FLAG),
+ IPSET_TYPE_MAC_FLAG = 2,
+ IPSET_TYPE_MAC = (1 << IPSET_TYPE_MAC_FLAG),
+ IPSET_TYPE_IP2_FLAG = 3,
+ IPSET_TYPE_IP2 = (1 << IPSET_TYPE_IP2_FLAG),
+ IPSET_TYPE_NAME_FLAG = 4,
+ IPSET_TYPE_NAME = (1 << IPSET_TYPE_NAME_FLAG),
+ /* Strictly speaking not a feature, but a flag for dumping:
+ * this settype must be dumped last */
+ IPSET_DUMP_LAST_FLAG = 7,
+ IPSET_DUMP_LAST = (1 << IPSET_DUMP_LAST_FLAG),
+};
+
+struct ip_set;
+
+typedef int (*ipset_adtfn)(struct ip_set *set, void *value, u32 timeout);
+
+/* Set type, variant-specific part */
+struct ip_set_type_variant {
+ /* Kernelspace: test/add/del entries
+ * returns negative error code,
+ * zero for no match/success to add/delete
+ * positive for matching element */
+ int (*kadt)(struct ip_set *set, const struct sk_buff * skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags);
+
+ /* Userspace: test/add/del entries
+ * returns negative error code,
+ * zero for no match/success to add/delete
+ * positive for matching element */
+ int (*uadt)(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags);
+
+ /* Low level add/del/test functions */
+ ipset_adtfn adt[IPSET_ADT_MAX];
+
+ /* When adding entries and set is full, try to resize the set */
+ int (*resize)(struct ip_set *set, bool retried);
+ /* Destroy the set */
+ void (*destroy)(struct ip_set *set);
+ /* Flush the elements */
+ void (*flush)(struct ip_set *set);
+ /* Expire entries before listing */
+ void (*expire)(struct ip_set *set);
+ /* List set header data */
+ int (*head)(struct ip_set *set, struct sk_buff *skb);
+ /* List elements */
+ int (*list)(const struct ip_set *set, struct sk_buff *skb,
+ struct netlink_callback *cb);
+
+ /* Return true if "b" set is the same as "a"
+ * according to the create set parameters */
+ bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
+};
+
+/* The core set type structure */
+struct ip_set_type {
+ struct list_head list;
+
+ /* Typename */
+ char name[IPSET_MAXNAMELEN];
+ /* Protocol version */
+ u8 protocol;
+ /* Set features to control swapping */
+ u8 features;
+ /* Set type dimension */
+ u8 dimension;
+ /* Supported family: may be AF_UNSPEC for both AF_INET/AF_INET6 */
+ u8 family;
+ /* Type revision */
+ u8 revision;
+
+ /* Create set */
+ int (*create)(struct ip_set *set, struct nlattr *tb[], u32 flags);
+
+ /* Attribute policies */
+ const struct nla_policy create_policy[IPSET_ATTR_CREATE_MAX + 1];
+ const struct nla_policy adt_policy[IPSET_ATTR_ADT_MAX + 1];
+
+ /* Set this to THIS_MODULE if you are a module, otherwise NULL */
+ struct module *me;
+};
+
+/* register and unregister set type */
+extern int ip_set_type_register(struct ip_set_type *set_type);
+extern void ip_set_type_unregister(struct ip_set_type *set_type);
+
+/* A generic IP set */
+struct ip_set {
+ /* The name of the set */
+ char name[IPSET_MAXNAMELEN];
+ /* Lock protecting the set data */
+ rwlock_t lock;
+ /* References to the set */
+ atomic_t ref;
+ /* The core set type */
+ struct ip_set_type *type;
+ /* The type variant doing the real job */
+ const struct ip_set_type_variant *variant;
+ /* The actual INET family of the set */
+ u8 family;
+ /* The type specific data */
+ void *data;
+};
+
+/* register and unregister set references */
+extern ip_set_id_t ip_set_get_byname(const char *name, struct ip_set **set);
+extern void ip_set_put_byindex(ip_set_id_t index);
+extern const char * ip_set_name_byindex(ip_set_id_t index);
+extern ip_set_id_t ip_set_nfnl_get(const char *name);
+extern ip_set_id_t ip_set_nfnl_get_byindex(ip_set_id_t index);
+extern void ip_set_nfnl_put(ip_set_id_t index);
+
+/* API for iptables set match, and SET target */
+extern int ip_set_add(ip_set_id_t id, const struct sk_buff *skb,
+ u8 family, u8 dim, u8 flags);
+extern int ip_set_del(ip_set_id_t id, const struct sk_buff *skb,
+ u8 family, u8 dim, u8 flags);
+extern int ip_set_test(ip_set_id_t id, const struct sk_buff *skb,
+ u8 family, u8 dim, u8 flags);
+
+/* Utility functions */
+extern void * ip_set_alloc(size_t size);
+extern void ip_set_free(void *members);
+extern int ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr);
+extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
+
+static inline int
+ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr)
+{
+ __be32 ip;
+ int ret = ip_set_get_ipaddr4(nla, &ip);
+
+ if (ret)
+ return ret;
+ *ipaddr = ntohl(ip);
+ return 0;
+}
+
+/* Ignore IPSET_ERR_EXIST errors if asked to do so? */
+static inline bool
+ip_set_eexist(int ret, u32 flags)
+{
+ return ret == -IPSET_ERR_EXIST && (flags & IPSET_FLAG_EXIST);
+}
+
+/* Check the NLA_F_NET_BYTEORDER flag */
+static inline bool
+ip_set_attr_netorder(struct nlattr *tb[], int type)
+{
+ return tb[type] && (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
+}
+
+static inline bool
+ip_set_optattr_netorder(struct nlattr *tb[], int type)
+{
+ return !tb[type] || (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
+}
+
+/* Useful converters */
+static inline u32
+ip_set_get_h32(const struct nlattr *attr)
+{
+ return ntohl(nla_get_be32(attr));
+}
+
+static inline u16
+ip_set_get_h16(const struct nlattr *attr)
+{
+ return ntohs(nla_get_be16(attr));
+}
+
+#define ipset_nest_start(skb, attr) nla_nest_start(skb, attr | NLA_F_NESTED)
+#define ipset_nest_end(skb, start) nla_nest_end(skb, start)
+
+#define NLA_PUT_IPADDR4(skb, type, ipaddr) \
+do { \
+ struct nlattr *__nested = ipset_nest_start(skb, type); \
+ \
+ if (!__nested) \
+ goto nla_put_failure; \
+ NLA_PUT_NET32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr); \
+ ipset_nest_end(skb, __nested); \
+} while (0)
+
+#define NLA_PUT_IPADDR6(skb, type, ipaddrptr) \
+do { \
+ struct nlattr *__nested = ipset_nest_start(skb, type); \
+ \
+ if (!__nested) \
+ goto nla_put_failure; \
+ NLA_PUT(skb, IPSET_ATTR_IPADDR_IPV6, \
+ sizeof(struct in6_addr), ipaddrptr); \
+ ipset_nest_end(skb, __nested); \
+} while (0)
+
+/* Get address from skbuff */
+static inline __be32
+ip4addr(const struct sk_buff *skb, bool src)
+{
+ return src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
+}
+
+static inline void
+ip4addrptr(const struct sk_buff *skb, bool src, __be32 *addr)
+{
+ *addr = src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
+}
+
+static inline void
+ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr)
+{
+ memcpy(addr, src ? &ipv6_hdr(skb)->saddr : &ipv6_hdr(skb)->daddr,
+ sizeof(*addr));
+}
+
+/* Calculate the bytes required to store the inclusive range of a-b */
+static inline int
+bitmap_bytes(u32 a, u32 b)
+{
+ return 4 * ((((b - a + 8) / 8) + 3) / 4);
+}
+
+/* Interface to iptables/ip6tables */
+
+#define SO_IP_SET 83
+
+union ip_set_name_index {
+ char name[IPSET_MAXNAMELEN];
+ ip_set_id_t index;
+};
+
+#define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */
+struct ip_set_req_get_set {
+ unsigned op;
+ unsigned version;
+ union ip_set_name_index set;
+};
+
+#define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */
+/* Uses ip_set_req_get_set */
+
+#define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */
+struct ip_set_req_version {
+ unsigned op;
+ unsigned version;
+};
+
+#endif /* __KERNEL__ */
+
+#endif /*_IP_SET_H */
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h
new file mode 100644
index 0000000..ec9d9be
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_ahash.h
@@ -0,0 +1,1074 @@
+#ifndef _IP_SET_AHASH_H
+#define _IP_SET_AHASH_H
+
+#include <linux/rcupdate.h>
+#include <linux/jhash.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+
+/* Hashing which uses arrays to resolve clashing. The hash table is resized
+ * (doubled) when searching becomes too long.
+ * Internally jhash is used with the assumption that the size of the
+ * stored data is a multiple of sizeof(u32). If storage supports timeout,
+ * the timeout field must be the last one in the data structure - that field
+ * is ignored when computing the hash key.
+ *
+ * Readers and resizing
+ *
+ * Resizing can be triggered by userspace command only, and those
+ * are serialized by the nfnl mutex. During resizing the set is
+ * read-locked, so the only possible concurrent operations are
+ * the kernel side readers. Those must be protected by proper RCU locking.
+ */
+
+/* Number of elements to store in an initial array block */
+#define AHASH_INIT_SIZE 4
+/* Max number of elements to store in an array block */
+#define AHASH_MAX_SIZE (3*4)
+
+/* A hash bucket */
+struct hbucket {
+ void *value; /* the array of the values */
+ u8 size; /* size of the array */
+ u8 pos; /* position of the first free entry */
+};
+
+/* The hash table: the table size stored here in order to make resizing easy */
+struct htable {
+ u8 htable_bits; /* size of hash table == 2^htable_bits */
+ struct hbucket bucket[0]; /* hashtable buckets */
+};
+
+#define hbucket(h, i) &((h)->bucket[i])
+
+/* Book-keeping of the prefixes added to the set */
+struct ip_set_hash_nets {
+ u8 cidr; /* the different cidr values in the set */
+ u32 nets; /* number of elements per cidr */
+};
+
+/* The generic ip_set hash structure */
+struct ip_set_hash {
+ struct htable *table; /* the hash table */
+ u32 maxelem; /* max elements in the hash */
+ u32 elements; /* current element (vs timeout) */
+ u32 initval; /* random jhash init value */
+ u32 timeout; /* timeout value, if enabled */
+ struct timer_list gc; /* garbage collection when timeout enabled */
+#ifdef IP_SET_HASH_WITH_NETMASK
+ u8 netmask; /* netmask value for subnets to store */
+#endif
+#ifdef IP_SET_HASH_WITH_NETS
+ struct ip_set_hash_nets nets[0]; /* book-keeping of prefixes */
+#endif
+};
+
+/* Compute htable_bits from the user input parameter hashsize */
+static u8
+htable_bits(u32 hashsize)
+{
+ /* Assume that hashsize == 2^htable_bits */
+ u8 bits = fls(hashsize - 1);
+ if (jhash_size(bits) != hashsize)
+ /* Round up to the first 2^n value */
+ bits = fls(hashsize);
+
+ return bits;
+}
+
+#ifdef IP_SET_HASH_WITH_NETS
+
+#define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128)
+
+/* Network cidr size book keeping when the hash stores different
+ * sized networks */
+static void
+add_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask)
+{
+ u8 i;
+
+ ++h->nets[cidr-1].nets;
+
+ pr_debug("add_cidr added %u: %u\n", cidr, h->nets[cidr-1].nets);
+
+ if (h->nets[cidr-1].nets > 1)
+ return;
+
+ /* New cidr size */
+ for (i = 0; i < host_mask && h->nets[i].cidr; i++) {
+ /* Add in increasing prefix order, so larger cidr first */
+ if (h->nets[i].cidr < cidr)
+ swap(h->nets[i].cidr, cidr);
+ }
+ if (i < host_mask)
+ h->nets[i].cidr = cidr;
+}
+
+static void
+del_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask)
+{
+ u8 i;
+
+ --h->nets[cidr-1].nets;
+
+ pr_debug("del_cidr deleted %u: %u\n", cidr, h->nets[cidr-1].nets);
+
+ if (h->nets[cidr-1].nets != 0)
+ return;
+
+ /* All entries with this cidr size deleted, so cleanup h->cidr[] */
+ for (i = 0; i < host_mask - 1 && h->nets[i].cidr; i++) {
+ if (h->nets[i].cidr == cidr)
+ h->nets[i].cidr = cidr = h->nets[i+1].cidr;
+ }
+ h->nets[i - 1].cidr = 0;
+}
+#endif
+
+/* Destroy the hashtable part of the set */
+static void
+ahash_destroy(struct htable *t)
+{
+ struct hbucket *n;
+ u32 i;
+
+ for (i = 0; i < jhash_size(t->htable_bits); i++) {
+ n = hbucket(t, i);
+ if (n->size)
+ /* FIXME: use slab cache */
+ kfree(n->value);
+ }
+
+ ip_set_free(t);
+}
+
+/* Calculate the actual memory size of the set data */
+static size_t
+ahash_memsize(const struct ip_set_hash *h, size_t dsize, u8 host_mask)
+{
+ u32 i;
+ struct htable *t = h->table;
+ size_t memsize = sizeof(*h)
+ + sizeof(*t)
+#ifdef IP_SET_HASH_WITH_NETS
+ + sizeof(struct ip_set_hash_nets) * host_mask
+#endif
+ + jhash_size(t->htable_bits) * sizeof(struct hbucket);
+
+ for (i = 0; i < jhash_size(t->htable_bits); i++)
+ memsize += t->bucket[i].size * dsize;
+
+ return memsize;
+}
+
+/* Flush a hash type of set: destroy all elements */
+static void
+ip_set_hash_flush(struct ip_set *set)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ struct hbucket *n;
+ u32 i;
+
+ for (i = 0; i < jhash_size(t->htable_bits); i++) {
+ n = hbucket(t, i);
+ if (n->size) {
+ n->size = n->pos = 0;
+ /* FIXME: use slab cache */
+ kfree(n->value);
+ }
+ }
+#ifdef IP_SET_HASH_WITH_NETS
+ memset(h->nets, 0, sizeof(struct ip_set_hash_nets)
+ * SET_HOST_MASK(set->family));
+#endif
+ h->elements = 0;
+}
+
+/* Destroy a hash type of set */
+static void
+ip_set_hash_destroy(struct ip_set *set)
+{
+ struct ip_set_hash *h = set->data;
+
+ if (with_timeout(h->timeout))
+ del_timer_sync(&h->gc);
+
+ ahash_destroy(h->table);
+ kfree(h);
+
+ set->data = NULL;
+}
+
+#define HKEY(data, initval, htable_bits) \
+(jhash2((u32 *)(data), sizeof(struct type_pf_elem)/sizeof(u32), initval) \
+ & jhash_mask(htable_bits))
+
+#endif /* _IP_SET_AHASH_H */
+
+#define CONCAT(a, b, c) a##b##c
+#define TOKEN(a, b, c) CONCAT(a, b, c)
+
+/* Type/family dependent function prototypes */
+
+#define type_pf_data_equal TOKEN(TYPE, PF, _data_equal)
+#define type_pf_data_isnull TOKEN(TYPE, PF, _data_isnull)
+#define type_pf_data_copy TOKEN(TYPE, PF, _data_copy)
+#define type_pf_data_zero_out TOKEN(TYPE, PF, _data_zero_out)
+#define type_pf_data_netmask TOKEN(TYPE, PF, _data_netmask)
+#define type_pf_data_list TOKEN(TYPE, PF, _data_list)
+#define type_pf_data_tlist TOKEN(TYPE, PF, _data_tlist)
+
+#define type_pf_elem TOKEN(TYPE, PF, _elem)
+#define type_pf_telem TOKEN(TYPE, PF, _telem)
+#define type_pf_data_timeout TOKEN(TYPE, PF, _data_timeout)
+#define type_pf_data_expired TOKEN(TYPE, PF, _data_expired)
+#define type_pf_data_timeout_set TOKEN(TYPE, PF, _data_timeout_set)
+
+#define type_pf_elem_add TOKEN(TYPE, PF, _elem_add)
+#define type_pf_add TOKEN(TYPE, PF, _add)
+#define type_pf_del TOKEN(TYPE, PF, _del)
+#define type_pf_test_cidrs TOKEN(TYPE, PF, _test_cidrs)
+#define type_pf_test TOKEN(TYPE, PF, _test)
+
+#define type_pf_elem_tadd TOKEN(TYPE, PF, _elem_tadd)
+#define type_pf_del_telem TOKEN(TYPE, PF, _ahash_del_telem)
+#define type_pf_expire TOKEN(TYPE, PF, _expire)
+#define type_pf_tadd TOKEN(TYPE, PF, _tadd)
+#define type_pf_tdel TOKEN(TYPE, PF, _tdel)
+#define type_pf_ttest_cidrs TOKEN(TYPE, PF, _ahash_ttest_cidrs)
+#define type_pf_ttest TOKEN(TYPE, PF, _ahash_ttest)
+
+#define type_pf_resize TOKEN(TYPE, PF, _resize)
+#define type_pf_tresize TOKEN(TYPE, PF, _tresize)
+#define type_pf_flush ip_set_hash_flush
+#define type_pf_destroy ip_set_hash_destroy
+#define type_pf_head TOKEN(TYPE, PF, _head)
+#define type_pf_list TOKEN(TYPE, PF, _list)
+#define type_pf_tlist TOKEN(TYPE, PF, _tlist)
+#define type_pf_same_set TOKEN(TYPE, PF, _same_set)
+#define type_pf_kadt TOKEN(TYPE, PF, _kadt)
+#define type_pf_uadt TOKEN(TYPE, PF, _uadt)
+#define type_pf_gc TOKEN(TYPE, PF, _gc)
+#define type_pf_gc_init TOKEN(TYPE, PF, _gc_init)
+#define type_pf_variant TOKEN(TYPE, PF, _variant)
+#define type_pf_tvariant TOKEN(TYPE, PF, _tvariant)
+
+/* Flavour without timeout */
+
+/* Get the ith element from the array block n */
+#define ahash_data(n, i) \
+ ((struct type_pf_elem *)((n)->value) + (i))
+
+/* Add an element to the hash table when resizing the set:
+ * we spare the maintenance of the internal counters. */
+static int
+type_pf_elem_add(struct hbucket *n, const struct type_pf_elem *value)
+{
+ if (n->pos >= n->size) {
+ void *tmp;
+
+ if (n->size >= AHASH_MAX_SIZE)
+ /* Trigger rehashing */
+ return -EAGAIN;
+
+ tmp = kzalloc((n->size + AHASH_INIT_SIZE)
+ * sizeof(struct type_pf_elem),
+ GFP_ATOMIC);
+ if (!tmp)
+ return -ENOMEM;
+ if (n->size) {
+ memcpy(tmp, n->value,
+ sizeof(struct type_pf_elem) * n->size);
+ kfree(n->value);
+ }
+ n->value = tmp;
+ n->size += AHASH_INIT_SIZE;
+ }
+ type_pf_data_copy(ahash_data(n, n->pos++), value);
+ return 0;
+}
+
+/* Resize a hash: create a new hash table with doubling the hashsize
+ * and inserting the elements to it. Repeat until we succeed or
+ * fail due to memory pressures. */
+static int
+type_pf_resize(struct ip_set *set, bool retried)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t, *orig = h->table;
+ u8 htable_bits = orig->htable_bits;
+ const struct type_pf_elem *data;
+ struct hbucket *n, *m;
+ u32 i, j;
+ int ret;
+
+retry:
+ ret = 0;
+ htable_bits++;
+ pr_debug("attempt to resize set %s from %u to %u, t %p\n",
+ set->name, orig->htable_bits, htable_bits, orig);
+ if (!htable_bits)
+ /* In case we have plenty of memory :-) */
+ return -IPSET_ERR_HASH_FULL;
+ t = ip_set_alloc(sizeof(*t)
+ + jhash_size(htable_bits) * sizeof(struct hbucket));
+ if (!t)
+ return -ENOMEM;
+ t->htable_bits = htable_bits;
+
+ read_lock_bh(&set->lock);
+ for (i = 0; i < jhash_size(orig->htable_bits); i++) {
+ n = hbucket(orig, i);
+ for (j = 0; j < n->pos; j++) {
+ data = ahash_data(n, j);
+ m = hbucket(t, HKEY(data, h->initval, htable_bits));
+ ret = type_pf_elem_add(m, data);
+ if (ret < 0) {
+ read_unlock_bh(&set->lock);
+ ahash_destroy(t);
+ if (ret == -EAGAIN)
+ goto retry;
+ return ret;
+ }
+ }
+ }
+
+ rcu_assign_pointer(h->table, t);
+ read_unlock_bh(&set->lock);
+
+ /* Give time to other readers of the set */
+ synchronize_rcu_bh();
+
+ pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
+ orig->htable_bits, orig, t->htable_bits, t);
+ ahash_destroy(orig);
+
+ return 0;
+}
+
+/* Add an element to a hash and update the internal counters when succeeded,
+ * otherwise report the proper error code. */
+static int
+type_pf_add(struct ip_set *set, void *value, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t;
+ const struct type_pf_elem *d = value;
+ struct hbucket *n;
+ int i, ret = 0;
+ u32 key;
+
+ if (h->elements >= h->maxelem)
+ return -IPSET_ERR_HASH_FULL;
+
+ rcu_read_lock_bh();
+ t = rcu_dereference_bh(h->table);
+ key = HKEY(value, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++)
+ if (type_pf_data_equal(ahash_data(n, i), d)) {
+ ret = -IPSET_ERR_EXIST;
+ goto out;
+ }
+
+ ret = type_pf_elem_add(n, value);
+ if (ret != 0)
+ goto out;
+
+#ifdef IP_SET_HASH_WITH_NETS
+ add_cidr(h, d->cidr, HOST_MASK);
+#endif
+ h->elements++;
+out:
+ rcu_read_unlock_bh();
+ return ret;
+}
+
+/* Delete an element from the hash: swap it with the last element
+ * and free up space if possible.
+ */
+static int
+type_pf_del(struct ip_set *set, void *value, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ const struct type_pf_elem *d = value;
+ struct hbucket *n;
+ int i;
+ struct type_pf_elem *data;
+ u32 key;
+
+ key = HKEY(value, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_data(n, i);
+ if (!type_pf_data_equal(data, d))
+ continue;
+ if (i != n->pos - 1)
+ /* Not last one */
+ type_pf_data_copy(data, ahash_data(n, n->pos - 1));
+
+ n->pos--;
+ h->elements--;
+#ifdef IP_SET_HASH_WITH_NETS
+ del_cidr(h, d->cidr, HOST_MASK);
+#endif
+ if (n->pos + AHASH_INIT_SIZE < n->size) {
+ void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
+ * sizeof(struct type_pf_elem),
+ GFP_ATOMIC);
+ if (!tmp)
+ return 0;
+ n->size -= AHASH_INIT_SIZE;
+ memcpy(tmp, n->value,
+ n->size * sizeof(struct type_pf_elem));
+ kfree(n->value);
+ n->value = tmp;
+ }
+ return 0;
+ }
+
+ return -IPSET_ERR_EXIST;
+}
+
+#ifdef IP_SET_HASH_WITH_NETS
+
+/* Special test function which takes into account the different network
+ * sizes added to the set */
+static int
+type_pf_test_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ struct hbucket *n;
+ const struct type_pf_elem *data;
+ int i, j = 0;
+ u32 key;
+ u8 host_mask = SET_HOST_MASK(set->family);
+
+ pr_debug("test by nets\n");
+ for (; j < host_mask && h->nets[j].cidr; j++) {
+ type_pf_data_netmask(d, h->nets[j].cidr);
+ key = HKEY(d, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_data(n, i);
+ if (type_pf_data_equal(data, d))
+ return 1;
+ }
+ }
+ return 0;
+}
+#endif
+
+/* Test whether the element is added to the set */
+static int
+type_pf_test(struct ip_set *set, void *value, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ struct type_pf_elem *d = value;
+ struct hbucket *n;
+ const struct type_pf_elem *data;
+ int i;
+ u32 key;
+
+#ifdef IP_SET_HASH_WITH_NETS
+ /* If we test an IP address and not a network address,
+ * try all possible network sizes */
+ if (d->cidr == SET_HOST_MASK(set->family))
+ return type_pf_test_cidrs(set, d, timeout);
+#endif
+
+ key = HKEY(d, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_data(n, i);
+ if (type_pf_data_equal(data, d))
+ return 1;
+ }
+ return 0;
+}
+
+/* Reply a HEADER request: fill out the header part of the set */
+static int
+type_pf_head(struct ip_set *set, struct sk_buff *skb)
+{
+ const struct ip_set_hash *h = set->data;
+ struct nlattr *nested;
+ size_t memsize;
+
+ read_lock_bh(&set->lock);
+ memsize = ahash_memsize(h, with_timeout(h->timeout)
+ ? sizeof(struct type_pf_telem)
+ : sizeof(struct type_pf_elem),
+ set->family == AF_INET ? 32 : 128);
+ read_unlock_bh(&set->lock);
+
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested)
+ goto nla_put_failure;
+ NLA_PUT_NET32(skb, IPSET_ATTR_HASHSIZE,
+ htonl(jhash_size(h->table->htable_bits)));
+ NLA_PUT_NET32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem));
+#ifdef IP_SET_HASH_WITH_NETMASK
+ if (h->netmask != HOST_MASK)
+ NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask);
+#endif
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+ htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize));
+ if (with_timeout(h->timeout))
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout));
+ ipset_nest_end(skb, nested);
+
+ return 0;
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+/* Reply a LIST/SAVE request: dump the elements of the specified set */
+static int
+type_pf_list(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct ip_set_hash *h = set->data;
+ const struct htable *t = h->table;
+ struct nlattr *atd, *nested;
+ const struct hbucket *n;
+ const struct type_pf_elem *data;
+ u32 first = cb->args[2];
+ /* We assume that one hash bucket fills into one page */
+ void *incomplete;
+ int i;
+
+ atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!atd)
+ return -EMSGSIZE;
+ pr_debug("list hash set %s\n", set->name);
+ for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) {
+ incomplete = skb_tail_pointer(skb);
+ n = hbucket(t, cb->args[2]);
+ pr_debug("cb->args[2]: %lu, t %p n %p\n", cb->args[2], t, n);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_data(n, i);
+ pr_debug("list hash %lu hbucket %p i %u, data %p\n",
+ cb->args[2], n, i, data);
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (cb->args[2] == first) {
+ nla_nest_cancel(skb, atd);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ if (type_pf_data_list(skb, data))
+ goto nla_put_failure;
+ ipset_nest_end(skb, nested);
+ }
+ }
+ ipset_nest_end(skb, atd);
+ /* Set listing finished */
+ cb->args[2] = 0;
+
+ return 0;
+
+nla_put_failure:
+ nlmsg_trim(skb, incomplete);
+ ipset_nest_end(skb, atd);
+ if (unlikely(first == cb->args[2])) {
+ pr_warning("Can't list set %s: one bucket does not fit into "
+ "a message. Please report it!\n", set->name);
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+static int
+type_pf_kadt(struct ip_set *set, const struct sk_buff * skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags);
+static int
+type_pf_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags);
+
+static const struct ip_set_type_variant type_pf_variant = {
+ .kadt = type_pf_kadt,
+ .uadt = type_pf_uadt,
+ .adt = {
+ [IPSET_ADD] = type_pf_add,
+ [IPSET_DEL] = type_pf_del,
+ [IPSET_TEST] = type_pf_test,
+ },
+ .destroy = type_pf_destroy,
+ .flush = type_pf_flush,
+ .head = type_pf_head,
+ .list = type_pf_list,
+ .resize = type_pf_resize,
+ .same_set = type_pf_same_set,
+};
+
+/* Flavour with timeout support */
+
+#define ahash_tdata(n, i) \
+ (struct type_pf_elem *)((struct type_pf_telem *)((n)->value) + (i))
+
+static inline u32
+type_pf_data_timeout(const struct type_pf_elem *data)
+{
+ const struct type_pf_telem *tdata =
+ (const struct type_pf_telem *) data;
+
+ return tdata->timeout;
+}
+
+static inline bool
+type_pf_data_expired(const struct type_pf_elem *data)
+{
+ const struct type_pf_telem *tdata =
+ (const struct type_pf_telem *) data;
+
+ return ip_set_timeout_expired(tdata->timeout);
+}
+
+static inline void
+type_pf_data_timeout_set(struct type_pf_elem *data, u32 timeout)
+{
+ struct type_pf_telem *tdata = (struct type_pf_telem *) data;
+
+ tdata->timeout = ip_set_timeout_set(timeout);
+}
+
+static int
+type_pf_elem_tadd(struct hbucket *n, const struct type_pf_elem *value,
+ u32 timeout)
+{
+ struct type_pf_elem *data;
+
+ if (n->pos >= n->size) {
+ void *tmp;
+
+ if (n->size >= AHASH_MAX_SIZE)
+ /* Trigger rehashing */
+ return -EAGAIN;
+
+ tmp = kzalloc((n->size + AHASH_INIT_SIZE)
+ * sizeof(struct type_pf_telem),
+ GFP_ATOMIC);
+ if (!tmp)
+ return -ENOMEM;
+ if (n->size) {
+ memcpy(tmp, n->value,
+ sizeof(struct type_pf_telem) * n->size);
+ kfree(n->value);
+ }
+ n->value = tmp;
+ n->size += AHASH_INIT_SIZE;
+ }
+ data = ahash_tdata(n, n->pos++);
+ type_pf_data_copy(data, value);
+ type_pf_data_timeout_set(data, timeout);
+ return 0;
+}
+
+/* Delete expired elements from the hashtable */
+static void
+type_pf_expire(struct ip_set_hash *h)
+{
+ struct htable *t = h->table;
+ struct hbucket *n;
+ struct type_pf_elem *data;
+ u32 i;
+ int j;
+
+ for (i = 0; i < jhash_size(t->htable_bits); i++) {
+ n = hbucket(t, i);
+ for (j = 0; j < n->pos; j++) {
+ data = ahash_tdata(n, j);
+ if (type_pf_data_expired(data)) {
+ pr_debug("expired %u/%u\n", i, j);
+#ifdef IP_SET_HASH_WITH_NETS
+ del_cidr(h, data->cidr, HOST_MASK);
+#endif
+ if (j != n->pos - 1)
+ /* Not last one */
+ type_pf_data_copy(data,
+ ahash_tdata(n, n->pos - 1));
+ n->pos--;
+ h->elements--;
+ }
+ }
+ if (n->pos + AHASH_INIT_SIZE < n->size) {
+ void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
+ * sizeof(struct type_pf_telem),
+ GFP_ATOMIC);
+ if (!tmp)
+ /* Still try to delete expired elements */
+ continue;
+ n->size -= AHASH_INIT_SIZE;
+ memcpy(tmp, n->value,
+ n->size * sizeof(struct type_pf_telem));
+ kfree(n->value);
+ n->value = tmp;
+ }
+ }
+}
+
+static int
+type_pf_tresize(struct ip_set *set, bool retried)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t, *orig = h->table;
+ u8 htable_bits = orig->htable_bits;
+ const struct type_pf_elem *data;
+ struct hbucket *n, *m;
+ u32 i, j;
+ int ret;
+
+ /* Try to cleanup once */
+ if (!retried) {
+ i = h->elements;
+ write_lock_bh(&set->lock);
+ type_pf_expire(set->data);
+ write_unlock_bh(&set->lock);
+ if (h->elements < i)
+ return 0;
+ }
+
+retry:
+ ret = 0;
+ htable_bits++;
+ if (!htable_bits)
+ /* In case we have plenty of memory :-) */
+ return -IPSET_ERR_HASH_FULL;
+ t = ip_set_alloc(sizeof(*t)
+ + jhash_size(htable_bits) * sizeof(struct hbucket));
+ if (!t)
+ return -ENOMEM;
+ t->htable_bits = htable_bits;
+
+ read_lock_bh(&set->lock);
+ for (i = 0; i < jhash_size(orig->htable_bits); i++) {
+ n = hbucket(orig, i);
+ for (j = 0; j < n->pos; j++) {
+ data = ahash_tdata(n, j);
+ m = hbucket(t, HKEY(data, h->initval, htable_bits));
+ ret = type_pf_elem_tadd(m, data,
+ type_pf_data_timeout(data));
+ if (ret < 0) {
+ read_unlock_bh(&set->lock);
+ ahash_destroy(t);
+ if (ret == -EAGAIN)
+ goto retry;
+ return ret;
+ }
+ }
+ }
+
+ rcu_assign_pointer(h->table, t);
+ read_unlock_bh(&set->lock);
+
+ /* Give time to other readers of the set */
+ synchronize_rcu_bh();
+
+ ahash_destroy(orig);
+
+ return 0;
+}
+
+static int
+type_pf_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ const struct type_pf_elem *d = value;
+ struct hbucket *n;
+ struct type_pf_elem *data;
+ int ret = 0, i, j = AHASH_MAX_SIZE + 1;
+ u32 key;
+
+ if (h->elements >= h->maxelem)
+ /* FIXME: when set is full, we slow down here */
+ type_pf_expire(h);
+ if (h->elements >= h->maxelem)
+ return -IPSET_ERR_HASH_FULL;
+
+ rcu_read_lock_bh();
+ t = rcu_dereference_bh(h->table);
+ key = HKEY(d, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_tdata(n, i);
+ if (type_pf_data_equal(data, d)) {
+ if (type_pf_data_expired(data))
+ j = i;
+ else {
+ ret = -IPSET_ERR_EXIST;
+ goto out;
+ }
+ } else if (j == AHASH_MAX_SIZE + 1 &&
+ type_pf_data_expired(data))
+ j = i;
+ }
+ if (j != AHASH_MAX_SIZE + 1) {
+ data = ahash_tdata(n, j);
+#ifdef IP_SET_HASH_WITH_NETS
+ del_cidr(h, data->cidr, HOST_MASK);
+ add_cidr(h, d->cidr, HOST_MASK);
+#endif
+ type_pf_data_copy(data, d);
+ type_pf_data_timeout_set(data, timeout);
+ goto out;
+ }
+ ret = type_pf_elem_tadd(n, d, timeout);
+ if (ret != 0)
+ goto out;
+
+#ifdef IP_SET_HASH_WITH_NETS
+ add_cidr(h, d->cidr, HOST_MASK);
+#endif
+ h->elements++;
+out:
+ rcu_read_unlock_bh();
+ return ret;
+}
+
+static int
+type_pf_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ const struct type_pf_elem *d = value;
+ struct hbucket *n;
+ int i, ret = 0;
+ struct type_pf_elem *data;
+ u32 key;
+
+ key = HKEY(value, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_tdata(n, i);
+ if (!type_pf_data_equal(data, d))
+ continue;
+ if (type_pf_data_expired(data))
+ ret = -IPSET_ERR_EXIST;
+ if (i != n->pos - 1)
+ /* Not last one */
+ type_pf_data_copy(data, ahash_tdata(n, n->pos - 1));
+
+ n->pos--;
+ h->elements--;
+#ifdef IP_SET_HASH_WITH_NETS
+ del_cidr(h, d->cidr, HOST_MASK);
+#endif
+ if (n->pos + AHASH_INIT_SIZE < n->size) {
+ void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
+ * sizeof(struct type_pf_telem),
+ GFP_ATOMIC);
+ if (!tmp)
+ return 0;
+ n->size -= AHASH_INIT_SIZE;
+ memcpy(tmp, n->value,
+ n->size * sizeof(struct type_pf_telem));
+ kfree(n->value);
+ n->value = tmp;
+ }
+ return 0;
+ }
+
+ return -IPSET_ERR_EXIST;
+}
+
+#ifdef IP_SET_HASH_WITH_NETS
+static int
+type_pf_ttest_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ struct type_pf_elem *data;
+ struct hbucket *n;
+ int i, j = 0;
+ u32 key;
+ u8 host_mask = SET_HOST_MASK(set->family);
+
+ for (; j < host_mask && h->nets[j].cidr; j++) {
+ type_pf_data_netmask(d, h->nets[j].cidr);
+ key = HKEY(d, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_tdata(n, i);
+ if (type_pf_data_equal(data, d))
+ return !type_pf_data_expired(data);
+ }
+ }
+ return 0;
+}
+#endif
+
+static int
+type_pf_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+ struct ip_set_hash *h = set->data;
+ struct htable *t = h->table;
+ struct type_pf_elem *data, *d = value;
+ struct hbucket *n;
+ int i;
+ u32 key;
+
+#ifdef IP_SET_HASH_WITH_NETS
+ if (d->cidr == SET_HOST_MASK(set->family))
+ return type_pf_ttest_cidrs(set, d, timeout);
+#endif
+ key = HKEY(d, h->initval, t->htable_bits);
+ n = hbucket(t, key);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_tdata(n, i);
+ if (type_pf_data_equal(data, d))
+ return !type_pf_data_expired(data);
+ }
+ return 0;
+}
+
+static int
+type_pf_tlist(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct ip_set_hash *h = set->data;
+ const struct htable *t = h->table;
+ struct nlattr *atd, *nested;
+ const struct hbucket *n;
+ const struct type_pf_elem *data;
+ u32 first = cb->args[2];
+ /* We assume that one hash bucket fills into one page */
+ void *incomplete;
+ int i;
+
+ atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!atd)
+ return -EMSGSIZE;
+ for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) {
+ incomplete = skb_tail_pointer(skb);
+ n = hbucket(t, cb->args[2]);
+ for (i = 0; i < n->pos; i++) {
+ data = ahash_tdata(n, i);
+ pr_debug("list %p %u\n", n, i);
+ if (type_pf_data_expired(data))
+ continue;
+ pr_debug("do list %p %u\n", n, i);
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (cb->args[2] == first) {
+ nla_nest_cancel(skb, atd);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ if (type_pf_data_tlist(skb, data))
+ goto nla_put_failure;
+ ipset_nest_end(skb, nested);
+ }
+ }
+ ipset_nest_end(skb, atd);
+ /* Set listing finished */
+ cb->args[2] = 0;
+
+ return 0;
+
+nla_put_failure:
+ nlmsg_trim(skb, incomplete);
+ ipset_nest_end(skb, atd);
+ if (unlikely(first == cb->args[2])) {
+ pr_warning("Can't list set %s: one bucket does not fit into "
+ "a message. Please report it!\n", set->name);
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+static const struct ip_set_type_variant type_pf_tvariant = {
+ .kadt = type_pf_kadt,
+ .uadt = type_pf_uadt,
+ .adt = {
+ [IPSET_ADD] = type_pf_tadd,
+ [IPSET_DEL] = type_pf_tdel,
+ [IPSET_TEST] = type_pf_ttest,
+ },
+ .destroy = type_pf_destroy,
+ .flush = type_pf_flush,
+ .head = type_pf_head,
+ .list = type_pf_tlist,
+ .resize = type_pf_tresize,
+ .same_set = type_pf_same_set,
+};
+
+static void
+type_pf_gc(unsigned long ul_set)
+{
+ struct ip_set *set = (struct ip_set *) ul_set;
+ struct ip_set_hash *h = set->data;
+
+ pr_debug("called\n");
+ write_lock_bh(&set->lock);
+ type_pf_expire(h);
+ write_unlock_bh(&set->lock);
+
+ h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
+ add_timer(&h->gc);
+}
+
+static void
+type_pf_gc_init(struct ip_set *set)
+{
+ struct ip_set_hash *h = set->data;
+
+ init_timer(&h->gc);
+ h->gc.data = (unsigned long) set;
+ h->gc.function = type_pf_gc;
+ h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
+ add_timer(&h->gc);
+ pr_debug("gc initialized, run in every %u\n",
+ IPSET_GC_PERIOD(h->timeout));
+}
+
+#undef type_pf_data_equal
+#undef type_pf_data_isnull
+#undef type_pf_data_copy
+#undef type_pf_data_zero_out
+#undef type_pf_data_list
+#undef type_pf_data_tlist
+
+#undef type_pf_elem
+#undef type_pf_telem
+#undef type_pf_data_timeout
+#undef type_pf_data_expired
+#undef type_pf_data_netmask
+#undef type_pf_data_timeout_set
+
+#undef type_pf_elem_add
+#undef type_pf_add
+#undef type_pf_del
+#undef type_pf_test_cidrs
+#undef type_pf_test
+
+#undef type_pf_elem_tadd
+#undef type_pf_expire
+#undef type_pf_tadd
+#undef type_pf_tdel
+#undef type_pf_ttest_cidrs
+#undef type_pf_ttest
+
+#undef type_pf_resize
+#undef type_pf_tresize
+#undef type_pf_flush
+#undef type_pf_destroy
+#undef type_pf_head
+#undef type_pf_list
+#undef type_pf_tlist
+#undef type_pf_same_set
+#undef type_pf_kadt
+#undef type_pf_uadt
+#undef type_pf_gc
+#undef type_pf_gc_init
+#undef type_pf_variant
+#undef type_pf_tvariant
diff --git a/include/linux/netfilter/ipset/ip_set_bitmap.h b/include/linux/netfilter/ipset/ip_set_bitmap.h
new file mode 100644
index 0000000..61a9e87
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_bitmap.h
@@ -0,0 +1,31 @@
+#ifndef __IP_SET_BITMAP_H
+#define __IP_SET_BITMAP_H
+
+/* Bitmap type specific error codes */
+enum {
+ /* The element is out of the range of the set */
+ IPSET_ERR_BITMAP_RANGE = IPSET_ERR_TYPE_SPECIFIC,
+ /* The range exceeds the size limit of the set type */
+ IPSET_ERR_BITMAP_RANGE_SIZE,
+};
+
+#ifdef __KERNEL__
+#define IPSET_BITMAP_MAX_RANGE 0x0000FFFF
+
+/* Common functions */
+
+static inline u32
+range_to_mask(u32 from, u32 to, u8 *bits)
+{
+ u32 mask = 0xFFFFFFFE;
+
+ *bits = 32;
+ while (--(*bits) > 0 && mask && (to & mask) != from)
+ mask <<= 1;
+
+ return mask;
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* __IP_SET_BITMAP_H */
diff --git a/include/linux/netfilter/ipset/ip_set_getport.h b/include/linux/netfilter/ipset/ip_set_getport.h
new file mode 100644
index 0000000..3882a81
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_getport.h
@@ -0,0 +1,21 @@
+#ifndef _IP_SET_GETPORT_H
+#define _IP_SET_GETPORT_H
+
+extern bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
+ __be16 *port, u8 *proto);
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+extern bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
+ __be16 *port, u8 *proto);
+#else
+static inline bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
+ __be16 *port, u8 *proto)
+{
+ return false;
+}
+#endif
+
+extern bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src,
+ __be16 *port);
+
+#endif /*_IP_SET_GETPORT_H*/
diff --git a/include/linux/netfilter/ipset/ip_set_hash.h b/include/linux/netfilter/ipset/ip_set_hash.h
new file mode 100644
index 0000000..b86f15c
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_hash.h
@@ -0,0 +1,26 @@
+#ifndef __IP_SET_HASH_H
+#define __IP_SET_HASH_H
+
+/* Hash type specific error codes */
+enum {
+ /* Hash is full */
+ IPSET_ERR_HASH_FULL = IPSET_ERR_TYPE_SPECIFIC,
+ /* Null-valued element */
+ IPSET_ERR_HASH_ELEM,
+ /* Invalid protocol */
+ IPSET_ERR_INVALID_PROTO,
+ /* Protocol missing but must be specified */
+ IPSET_ERR_MISSING_PROTO,
+};
+
+#ifdef __KERNEL__
+
+#define IPSET_DEFAULT_HASHSIZE 1024
+#define IPSET_MIMINAL_HASHSIZE 64
+#define IPSET_DEFAULT_MAXELEM 65536
+#define IPSET_DEFAULT_PROBES 4
+#define IPSET_DEFAULT_RESIZE 100
+
+#endif /* __KERNEL__ */
+
+#endif /* __IP_SET_HASH_H */
diff --git a/include/linux/netfilter/ipset/ip_set_list.h b/include/linux/netfilter/ipset/ip_set_list.h
new file mode 100644
index 0000000..40a63f3
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_list.h
@@ -0,0 +1,27 @@
+#ifndef __IP_SET_LIST_H
+#define __IP_SET_LIST_H
+
+/* List type specific error codes */
+enum {
+ /* Set name to be added/deleted/tested does not exist. */
+ IPSET_ERR_NAME = IPSET_ERR_TYPE_SPECIFIC,
+ /* list:set type is not permitted to add */
+ IPSET_ERR_LOOP,
+ /* Missing reference set */
+ IPSET_ERR_BEFORE,
+ /* Reference set does not exist */
+ IPSET_ERR_NAMEREF,
+ /* Set is full */
+ IPSET_ERR_LIST_FULL,
+ /* Reference set is not added to the set */
+ IPSET_ERR_REF_EXIST,
+};
+
+#ifdef __KERNEL__
+
+#define IP_SET_LIST_DEFAULT_SIZE 8
+#define IP_SET_LIST_MIN_SIZE 4
+
+#endif /* __KERNEL__ */
+
+#endif /* __IP_SET_LIST_H */
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
new file mode 100644
index 0000000..9f30c5f
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -0,0 +1,127 @@
+#ifndef _IP_SET_TIMEOUT_H
+#define _IP_SET_TIMEOUT_H
+
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef __KERNEL__
+
+/* How often should the gc be run by default */
+#define IPSET_GC_TIME (3 * 60)
+
+/* Timeout period depending on the timeout value of the given set */
+#define IPSET_GC_PERIOD(timeout) \
+ ((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1)
+
+/* Set is defined without timeout support: timeout value may be 0 */
+#define IPSET_NO_TIMEOUT UINT_MAX
+
+#define with_timeout(timeout) ((timeout) != IPSET_NO_TIMEOUT)
+
+static inline unsigned int
+ip_set_timeout_uget(struct nlattr *tb)
+{
+ unsigned int timeout = ip_set_get_h32(tb);
+
+ /* Userspace supplied TIMEOUT parameter: adjust crazy size */
+ return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout;
+}
+
+#ifdef IP_SET_BITMAP_TIMEOUT
+
+/* Bitmap specific timeout constants and macros for the entries */
+
+/* Bitmap entry is unset */
+#define IPSET_ELEM_UNSET 0
+/* Bitmap entry is set with no timeout value */
+#define IPSET_ELEM_PERMANENT (UINT_MAX/2)
+
+static inline bool
+ip_set_timeout_test(unsigned long timeout)
+{
+ return timeout != IPSET_ELEM_UNSET &&
+ (timeout == IPSET_ELEM_PERMANENT ||
+ time_after(timeout, jiffies));
+}
+
+static inline bool
+ip_set_timeout_expired(unsigned long timeout)
+{
+ return timeout != IPSET_ELEM_UNSET &&
+ timeout != IPSET_ELEM_PERMANENT &&
+ time_before(timeout, jiffies);
+}
+
+static inline unsigned long
+ip_set_timeout_set(u32 timeout)
+{
+ unsigned long t;
+
+ if (!timeout)
+ return IPSET_ELEM_PERMANENT;
+
+ t = timeout * HZ + jiffies;
+ if (t == IPSET_ELEM_UNSET || t == IPSET_ELEM_PERMANENT)
+ /* Bingo! */
+ t++;
+
+ return t;
+}
+
+static inline u32
+ip_set_timeout_get(unsigned long timeout)
+{
+ return timeout == IPSET_ELEM_PERMANENT ? 0 : (timeout - jiffies)/HZ;
+}
+
+#else
+
+/* Hash specific timeout constants and macros for the entries */
+
+/* Hash entry is set with no timeout value */
+#define IPSET_ELEM_PERMANENT 0
+
+static inline bool
+ip_set_timeout_test(unsigned long timeout)
+{
+ return timeout == IPSET_ELEM_PERMANENT ||
+ time_after(timeout, jiffies);
+}
+
+static inline bool
+ip_set_timeout_expired(unsigned long timeout)
+{
+ return timeout != IPSET_ELEM_PERMANENT &&
+ time_before(timeout, jiffies);
+}
+
+static inline unsigned long
+ip_set_timeout_set(u32 timeout)
+{
+ unsigned long t;
+
+ if (!timeout)
+ return IPSET_ELEM_PERMANENT;
+
+ t = timeout * HZ + jiffies;
+ if (t == IPSET_ELEM_PERMANENT)
+ /* Bingo! :-) */
+ t++;
+
+ return t;
+}
+
+static inline u32
+ip_set_timeout_get(unsigned long timeout)
+{
+ return timeout == IPSET_ELEM_PERMANENT ? 0 : (timeout - jiffies)/HZ;
+}
+#endif /* ! IP_SET_BITMAP_TIMEOUT */
+
+#endif /* __KERNEL__ */
+
+#endif /* _IP_SET_TIMEOUT_H */
diff --git a/include/linux/netfilter/ipset/pfxlen.h b/include/linux/netfilter/ipset/pfxlen.h
new file mode 100644
index 0000000..0e1fb50
--- /dev/null
+++ b/include/linux/netfilter/ipset/pfxlen.h
@@ -0,0 +1,35 @@
+#ifndef _PFXLEN_H
+#define _PFXLEN_H
+
+#include <asm/byteorder.h>
+#include <linux/netfilter.h>
+
+/* Prefixlen maps, by Jan Engelhardt */
+extern const union nf_inet_addr ip_set_netmask_map[];
+extern const union nf_inet_addr ip_set_hostmask_map[];
+
+static inline __be32
+ip_set_netmask(u8 pfxlen)
+{
+ return ip_set_netmask_map[pfxlen].ip;
+}
+
+static inline const __be32 *
+ip_set_netmask6(u8 pfxlen)
+{
+ return &ip_set_netmask_map[pfxlen].ip6[0];
+}
+
+static inline u32
+ip_set_hostmask(u8 pfxlen)
+{
+ return (__force u32) ip_set_hostmask_map[pfxlen].ip;
+}
+
+static inline const __be32 *
+ip_set_hostmask6(u8 pfxlen)
+{
+ return &ip_set_hostmask_map[pfxlen].ip6[0];
+}
+
+#endif /*_PFXLEN_H */
diff --git a/include/linux/netfilter/nf_conntrack_snmp.h b/include/linux/netfilter/nf_conntrack_snmp.h
new file mode 100644
index 0000000..064bc63
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_snmp.h
@@ -0,0 +1,9 @@
+#ifndef _NF_CONNTRACK_SNMP_H
+#define _NF_CONNTRACK_SNMP_H
+
+extern int (*nf_nat_snmp_hook)(struct sk_buff *skb,
+ unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo);
+
+#endif /* _NF_CONNTRACK_SNMP_H */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 361d6b5..2b11fc1 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -47,7 +47,8 @@ struct nfgenmsg {
#define NFNL_SUBSYS_QUEUE 3
#define NFNL_SUBSYS_ULOG 4
#define NFNL_SUBSYS_OSF 5
-#define NFNL_SUBSYS_COUNT 6
+#define NFNL_SUBSYS_IPSET 6
+#define NFNL_SUBSYS_COUNT 7
#ifdef __KERNEL__
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h
index 19711e3..debf1ae 100644
--- a/include/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/linux/netfilter/nfnetlink_conntrack.h
@@ -42,6 +42,7 @@ enum ctattr_type {
CTA_SECMARK, /* obsolete */
CTA_ZONE,
CTA_SECCTX,
+ CTA_TIMESTAMP,
__CTA_MAX
};
#define CTA_MAX (__CTA_MAX - 1)
@@ -127,6 +128,14 @@ enum ctattr_counters {
};
#define CTA_COUNTERS_MAX (__CTA_COUNTERS_MAX - 1)
+enum ctattr_tstamp {
+ CTA_TIMESTAMP_UNSPEC,
+ CTA_TIMESTAMP_START,
+ CTA_TIMESTAMP_STOP,
+ __CTA_TIMESTAMP_MAX
+};
+#define CTA_TIMESTAMP_MAX (__CTA_TIMESTAMP_MAX - 1)
+
enum ctattr_nat {
CTA_NAT_UNSPEC,
CTA_NAT_MINIP,
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 6712e71..3721952 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -611,8 +611,9 @@ struct _compat_xt_align {
extern void xt_compat_lock(u_int8_t af);
extern void xt_compat_unlock(u_int8_t af);
-extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta);
+extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
extern void xt_compat_flush_offsets(u_int8_t af);
+extern void xt_compat_init_offsets(u_int8_t af, unsigned int number);
extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
extern int xt_compat_match_offset(const struct xt_match *match);
diff --git a/include/linux/netfilter/xt_AUDIT.h b/include/linux/netfilter/xt_AUDIT.h
new file mode 100644
index 0000000..38751d2
--- /dev/null
+++ b/include/linux/netfilter/xt_AUDIT.h
@@ -0,0 +1,30 @@
+/*
+ * Header file for iptables xt_AUDIT target
+ *
+ * (C) 2010-2011 Thomas Graf <tgraf@redhat.com>
+ * (C) 2010-2011 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _XT_AUDIT_TARGET_H
+#define _XT_AUDIT_TARGET_H
+
+#include <linux/types.h>
+
+enum {
+ XT_AUDIT_TYPE_ACCEPT = 0,
+ XT_AUDIT_TYPE_DROP,
+ XT_AUDIT_TYPE_REJECT,
+ __XT_AUDIT_TYPE_MAX,
+};
+
+#define XT_AUDIT_TYPE_MAX (__XT_AUDIT_TYPE_MAX - 1)
+
+struct xt_audit_info {
+ __u8 type; /* XT_AUDIT_TYPE_* */
+};
+
+#endif /* _XT_AUDIT_TARGET_H */
diff --git a/include/linux/netfilter/xt_CT.h b/include/linux/netfilter/xt_CT.h
index 1b56410..b56e768 100644
--- a/include/linux/netfilter/xt_CT.h
+++ b/include/linux/netfilter/xt_CT.h
@@ -1,14 +1,16 @@
#ifndef _XT_CT_H
#define _XT_CT_H
+#include <linux/types.h>
+
#define XT_CT_NOTRACK 0x1
struct xt_ct_target_info {
- u_int16_t flags;
- u_int16_t zone;
- u_int32_t ct_events;
- u_int32_t exp_events;
- char helper[16];
+ __u16 flags;
+ __u16 zone;
+ __u32 ct_events;
+ __u32 exp_events;
+ char helper[16];
/* Used internally by the kernel */
struct nf_conn *ct __attribute__((aligned(8)));
diff --git a/include/linux/netfilter/xt_NFQUEUE.h b/include/linux/netfilter/xt_NFQUEUE.h
index 2584f4a..9eafdbb 100644
--- a/include/linux/netfilter/xt_NFQUEUE.h
+++ b/include/linux/netfilter/xt_NFQUEUE.h
@@ -20,4 +20,10 @@ struct xt_NFQ_info_v1 {
__u16 queues_total;
};
+struct xt_NFQ_info_v2 {
+ __u16 queuenum;
+ __u16 queues_total;
+ __u16 bypass;
+};
+
#endif /* _XT_NFQ_TARGET_H */
diff --git a/include/linux/netfilter/xt_TCPOPTSTRIP.h b/include/linux/netfilter/xt_TCPOPTSTRIP.h
index 2db5432..7157318 100644
--- a/include/linux/netfilter/xt_TCPOPTSTRIP.h
+++ b/include/linux/netfilter/xt_TCPOPTSTRIP.h
@@ -1,13 +1,15 @@
#ifndef _XT_TCPOPTSTRIP_H
#define _XT_TCPOPTSTRIP_H
+#include <linux/types.h>
+
#define tcpoptstrip_set_bit(bmap, idx) \
(bmap[(idx) >> 5] |= 1U << (idx & 31))
#define tcpoptstrip_test_bit(bmap, idx) \
(((1U << (idx & 31)) & bmap[(idx) >> 5]) != 0)
struct xt_tcpoptstrip_target_info {
- u_int32_t strip_bmap[8];
+ __u32 strip_bmap[8];
};
#endif /* _XT_TCPOPTSTRIP_H */
diff --git a/include/linux/netfilter/xt_TPROXY.h b/include/linux/netfilter/xt_TPROXY.h
index 3f3d693..902043c 100644
--- a/include/linux/netfilter/xt_TPROXY.h
+++ b/include/linux/netfilter/xt_TPROXY.h
@@ -1,19 +1,21 @@
#ifndef _XT_TPROXY_H
#define _XT_TPROXY_H
+#include <linux/types.h>
+
/* TPROXY target is capable of marking the packet to perform
* redirection. We can get rid of that whenever we get support for
* mutliple targets in the same rule. */
struct xt_tproxy_target_info {
- u_int32_t mark_mask;
- u_int32_t mark_value;
+ __u32 mark_mask;
+ __u32 mark_value;
__be32 laddr;
__be16 lport;
};
struct xt_tproxy_target_info_v1 {
- u_int32_t mark_mask;
- u_int32_t mark_value;
+ __u32 mark_mask;
+ __u32 mark_value;
union nf_inet_addr laddr;
__be16 lport;
};
diff --git a/include/linux/netfilter/xt_cluster.h b/include/linux/netfilter/xt_cluster.h
index 8866826..9b883c8 100644
--- a/include/linux/netfilter/xt_cluster.h
+++ b/include/linux/netfilter/xt_cluster.h
@@ -1,15 +1,17 @@
#ifndef _XT_CLUSTER_MATCH_H
#define _XT_CLUSTER_MATCH_H
+#include <linux/types.h>
+
enum xt_cluster_flags {
XT_CLUSTER_F_INV = (1 << 0)
};
struct xt_cluster_match_info {
- u_int32_t total_nodes;
- u_int32_t node_mask;
- u_int32_t hash_seed;
- u_int32_t flags;
+ __u32 total_nodes;
+ __u32 node_mask;
+ __u32 hash_seed;
+ __u32 flags;
};
#define XT_CLUSTER_NODES_MAX 32
diff --git a/include/linux/netfilter/xt_comment.h b/include/linux/netfilter/xt_comment.h
index eacfedc..0ea5e79 100644
--- a/include/linux/netfilter/xt_comment.h
+++ b/include/linux/netfilter/xt_comment.h
@@ -4,7 +4,7 @@
#define XT_MAX_COMMENT_LEN 256
struct xt_comment_info {
- unsigned char comment[XT_MAX_COMMENT_LEN];
+ char comment[XT_MAX_COMMENT_LEN];
};
#endif /* XT_COMMENT_H */
diff --git a/include/linux/netfilter/xt_connlimit.h b/include/linux/netfilter/xt_connlimit.h
index 7e3284b..0ca66e9 100644
--- a/include/linux/netfilter/xt_connlimit.h
+++ b/include/linux/netfilter/xt_connlimit.h
@@ -1,8 +1,15 @@
#ifndef _XT_CONNLIMIT_H
#define _XT_CONNLIMIT_H
+#include <linux/types.h>
+
struct xt_connlimit_data;
+enum {
+ XT_CONNLIMIT_INVERT = 1 << 0,
+ XT_CONNLIMIT_DADDR = 1 << 1,
+};
+
struct xt_connlimit_info {
union {
union nf_inet_addr mask;
@@ -13,7 +20,14 @@ struct xt_connlimit_info {
};
#endif
};
- unsigned int limit, inverse;
+ unsigned int limit;
+ union {
+ /* revision 0 */
+ unsigned int inverse;
+
+ /* revision 1 */
+ __u32 flags;
+ };
/* Used internally by the kernel */
struct xt_connlimit_data *data __attribute__((aligned(8)));
diff --git a/include/linux/netfilter/xt_conntrack.h b/include/linux/netfilter/xt_conntrack.h
index 54f47a2..74b904d 100644
--- a/include/linux/netfilter/xt_conntrack.h
+++ b/include/linux/netfilter/xt_conntrack.h
@@ -58,4 +58,19 @@ struct xt_conntrack_mtinfo2 {
__u16 state_mask, status_mask;
};
+struct xt_conntrack_mtinfo3 {
+ union nf_inet_addr origsrc_addr, origsrc_mask;
+ union nf_inet_addr origdst_addr, origdst_mask;
+ union nf_inet_addr replsrc_addr, replsrc_mask;
+ union nf_inet_addr repldst_addr, repldst_mask;
+ __u32 expires_min, expires_max;
+ __u16 l4proto;
+ __u16 origsrc_port, origdst_port;
+ __u16 replsrc_port, repldst_port;
+ __u16 match_flags, invert_flags;
+ __u16 state_mask, status_mask;
+ __u16 origsrc_port_high, origdst_port_high;
+ __u16 replsrc_port_high, repldst_port_high;
+};
+
#endif /*_XT_CONNTRACK_H*/
diff --git a/include/linux/netfilter/xt_devgroup.h b/include/linux/netfilter/xt_devgroup.h
new file mode 100644
index 0000000..1babde0
--- /dev/null
+++ b/include/linux/netfilter/xt_devgroup.h
@@ -0,0 +1,21 @@
+#ifndef _XT_DEVGROUP_H
+#define _XT_DEVGROUP_H
+
+#include <linux/types.h>
+
+enum xt_devgroup_flags {
+ XT_DEVGROUP_MATCH_SRC = 0x1,
+ XT_DEVGROUP_INVERT_SRC = 0x2,
+ XT_DEVGROUP_MATCH_DST = 0x4,
+ XT_DEVGROUP_INVERT_DST = 0x8,
+};
+
+struct xt_devgroup_info {
+ __u32 flags;
+ __u32 src_group;
+ __u32 src_mask;
+ __u32 dst_group;
+ __u32 dst_mask;
+};
+
+#endif /* _XT_DEVGROUP_H */
diff --git a/include/linux/netfilter/xt_quota.h b/include/linux/netfilter/xt_quota.h
index b0d28c6..ca6e03e 100644
--- a/include/linux/netfilter/xt_quota.h
+++ b/include/linux/netfilter/xt_quota.h
@@ -1,6 +1,8 @@
#ifndef _XT_QUOTA_H
#define _XT_QUOTA_H
+#include <linux/types.h>
+
enum xt_quota_flags {
XT_QUOTA_INVERT = 0x1,
};
@@ -9,9 +11,9 @@ enum xt_quota_flags {
struct xt_quota_priv;
struct xt_quota_info {
- u_int32_t flags;
- u_int32_t pad;
- aligned_u64 quota;
+ __u32 flags;
+ __u32 pad;
+ aligned_u64 quota;
/* Used internally by the kernel */
struct xt_quota_priv *master;
diff --git a/include/linux/netfilter/xt_set.h b/include/linux/netfilter/xt_set.h
new file mode 100644
index 0000000..081f1de
--- /dev/null
+++ b/include/linux/netfilter/xt_set.h
@@ -0,0 +1,56 @@
+#ifndef _XT_SET_H
+#define _XT_SET_H
+
+#include <linux/types.h>
+#include <linux/netfilter/ipset/ip_set.h>
+
+/* Revision 0 interface: backward compatible with netfilter/iptables */
+
+/*
+ * Option flags for kernel operations (xt_set_info_v0)
+ */
+#define IPSET_SRC 0x01 /* Source match/add */
+#define IPSET_DST 0x02 /* Destination match/add */
+#define IPSET_MATCH_INV 0x04 /* Inverse matching */
+
+struct xt_set_info_v0 {
+ ip_set_id_t index;
+ union {
+ __u32 flags[IPSET_DIM_MAX + 1];
+ struct {
+ __u32 __flags[IPSET_DIM_MAX];
+ __u8 dim;
+ __u8 flags;
+ } compat;
+ } u;
+};
+
+/* match and target infos */
+struct xt_set_info_match_v0 {
+ struct xt_set_info_v0 match_set;
+};
+
+struct xt_set_info_target_v0 {
+ struct xt_set_info_v0 add_set;
+ struct xt_set_info_v0 del_set;
+};
+
+/* Revision 1: current interface to netfilter/iptables */
+
+struct xt_set_info {
+ ip_set_id_t index;
+ __u8 dim;
+ __u8 flags;
+};
+
+/* match and target infos */
+struct xt_set_info_match {
+ struct xt_set_info match_set;
+};
+
+struct xt_set_info_target {
+ struct xt_set_info add_set;
+ struct xt_set_info del_set;
+};
+
+#endif /*_XT_SET_H*/
diff --git a/include/linux/netfilter/xt_socket.h b/include/linux/netfilter/xt_socket.h
index 6f475b8..26d7217 100644
--- a/include/linux/netfilter/xt_socket.h
+++ b/include/linux/netfilter/xt_socket.h
@@ -1,6 +1,8 @@
#ifndef _XT_SOCKET_H
#define _XT_SOCKET_H
+#include <linux/types.h>
+
enum {
XT_SOCKET_TRANSPARENT = 1 << 0,
};
diff --git a/include/linux/netfilter/xt_time.h b/include/linux/netfilter/xt_time.h
index 14b6df4..7c37fac 100644
--- a/include/linux/netfilter/xt_time.h
+++ b/include/linux/netfilter/xt_time.h
@@ -1,14 +1,16 @@
#ifndef _XT_TIME_H
#define _XT_TIME_H 1
+#include <linux/types.h>
+
struct xt_time_info {
- u_int32_t date_start;
- u_int32_t date_stop;
- u_int32_t daytime_start;
- u_int32_t daytime_stop;
- u_int32_t monthdays_match;
- u_int8_t weekdays_match;
- u_int8_t flags;
+ __u32 date_start;
+ __u32 date_stop;
+ __u32 daytime_start;
+ __u32 daytime_stop;
+ __u32 monthdays_match;
+ __u8 weekdays_match;
+ __u8 flags;
};
enum {
diff --git a/include/linux/netfilter/xt_u32.h b/include/linux/netfilter/xt_u32.h
index 9947f56..04d1bfe 100644
--- a/include/linux/netfilter/xt_u32.h
+++ b/include/linux/netfilter/xt_u32.h
@@ -1,6 +1,8 @@
#ifndef _XT_U32_H
#define _XT_U32_H 1
+#include <linux/types.h>
+
enum xt_u32_ops {
XT_U32_AND,
XT_U32_LEFTSH,
@@ -9,13 +11,13 @@ enum xt_u32_ops {
};
struct xt_u32_location_element {
- u_int32_t number;
- u_int8_t nextop;
+ __u32 number;
+ __u8 nextop;
};
struct xt_u32_value_element {
- u_int32_t min;
- u_int32_t max;
+ __u32 min;
+ __u32 max;
};
/*
@@ -27,14 +29,14 @@ struct xt_u32_value_element {
struct xt_u32_test {
struct xt_u32_location_element location[XT_U32_MAXSIZE+1];
struct xt_u32_value_element value[XT_U32_MAXSIZE+1];
- u_int8_t nnums;
- u_int8_t nvalues;
+ __u8 nnums;
+ __u8 nvalues;
};
struct xt_u32 {
struct xt_u32_test tests[XT_U32_MAXSIZE+1];
- u_int8_t ntests;
- u_int8_t invert;
+ __u8 ntests;
+ __u8 invert;
};
#endif /* _XT_U32_H */
diff --git a/include/linux/netfilter_bridge/ebt_802_3.h b/include/linux/netfilter_bridge/ebt_802_3.h
index c73ef0b..be5be15 100644
--- a/include/linux/netfilter_bridge/ebt_802_3.h
+++ b/include/linux/netfilter_bridge/ebt_802_3.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_802_3_H
#define __LINUX_BRIDGE_EBT_802_3_H
+#include <linux/types.h>
+
#define EBT_802_3_SAP 0x01
#define EBT_802_3_TYPE 0x02
@@ -24,24 +26,24 @@
/* ui has one byte ctrl, ni has two */
struct hdr_ui {
- uint8_t dsap;
- uint8_t ssap;
- uint8_t ctrl;
- uint8_t orig[3];
+ __u8 dsap;
+ __u8 ssap;
+ __u8 ctrl;
+ __u8 orig[3];
__be16 type;
};
struct hdr_ni {
- uint8_t dsap;
- uint8_t ssap;
+ __u8 dsap;
+ __u8 ssap;
__be16 ctrl;
- uint8_t orig[3];
+ __u8 orig[3];
__be16 type;
};
struct ebt_802_3_hdr {
- uint8_t daddr[6];
- uint8_t saddr[6];
+ __u8 daddr[6];
+ __u8 saddr[6];
__be16 len;
union {
struct hdr_ui ui;
@@ -59,10 +61,10 @@ static inline struct ebt_802_3_hdr *ebt_802_3_hdr(const struct sk_buff *skb)
#endif
struct ebt_802_3_info {
- uint8_t sap;
+ __u8 sap;
__be16 type;
- uint8_t bitmask;
- uint8_t invflags;
+ __u8 bitmask;
+ __u8 invflags;
};
#endif
diff --git a/include/linux/netfilter_bridge/ebt_among.h b/include/linux/netfilter_bridge/ebt_among.h
index 0009558..bd4e3ad 100644
--- a/include/linux/netfilter_bridge/ebt_among.h
+++ b/include/linux/netfilter_bridge/ebt_among.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_AMONG_H
#define __LINUX_BRIDGE_EBT_AMONG_H
+#include <linux/types.h>
+
#define EBT_AMONG_DST 0x01
#define EBT_AMONG_SRC 0x02
@@ -30,7 +32,7 @@
*/
struct ebt_mac_wormhash_tuple {
- uint32_t cmp[2];
+ __u32 cmp[2];
__be32 ip;
};
diff --git a/include/linux/netfilter_bridge/ebt_arp.h b/include/linux/netfilter_bridge/ebt_arp.h
index cbf4843..522f3e4 100644
--- a/include/linux/netfilter_bridge/ebt_arp.h
+++ b/include/linux/netfilter_bridge/ebt_arp.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_ARP_H
#define __LINUX_BRIDGE_EBT_ARP_H
+#include <linux/types.h>
+
#define EBT_ARP_OPCODE 0x01
#define EBT_ARP_HTYPE 0x02
#define EBT_ARP_PTYPE 0x04
@@ -27,8 +29,8 @@ struct ebt_arp_info
unsigned char smmsk[ETH_ALEN];
unsigned char dmaddr[ETH_ALEN];
unsigned char dmmsk[ETH_ALEN];
- uint8_t bitmask;
- uint8_t invflags;
+ __u8 bitmask;
+ __u8 invflags;
};
#endif
diff --git a/include/linux/netfilter_bridge/ebt_ip.h b/include/linux/netfilter_bridge/ebt_ip.h
index 6a708fb..c4bbc41 100644
--- a/include/linux/netfilter_bridge/ebt_ip.h
+++ b/include/linux/netfilter_bridge/ebt_ip.h
@@ -15,6 +15,8 @@
#ifndef __LINUX_BRIDGE_EBT_IP_H
#define __LINUX_BRIDGE_EBT_IP_H
+#include <linux/types.h>
+
#define EBT_IP_SOURCE 0x01
#define EBT_IP_DEST 0x02
#define EBT_IP_TOS 0x04
@@ -31,12 +33,12 @@ struct ebt_ip_info {
__be32 daddr;
__be32 smsk;
__be32 dmsk;
- uint8_t tos;
- uint8_t protocol;
- uint8_t bitmask;
- uint8_t invflags;
- uint16_t sport[2];
- uint16_t dport[2];
+ __u8 tos;
+ __u8 protocol;
+ __u8 bitmask;
+ __u8 invflags;
+ __u16 sport[2];
+ __u16 dport[2];
};
#endif
diff --git a/include/linux/netfilter_bridge/ebt_ip6.h b/include/linux/netfilter_bridge/ebt_ip6.h
index e5de987..42b8896 100644
--- a/include/linux/netfilter_bridge/ebt_ip6.h
+++ b/include/linux/netfilter_bridge/ebt_ip6.h
@@ -12,14 +12,19 @@
#ifndef __LINUX_BRIDGE_EBT_IP6_H
#define __LINUX_BRIDGE_EBT_IP6_H
+#include <linux/types.h>
+
#define EBT_IP6_SOURCE 0x01
#define EBT_IP6_DEST 0x02
#define EBT_IP6_TCLASS 0x04
#define EBT_IP6_PROTO 0x08
#define EBT_IP6_SPORT 0x10
#define EBT_IP6_DPORT 0x20
+#define EBT_IP6_ICMP6 0x40
+
#define EBT_IP6_MASK (EBT_IP6_SOURCE | EBT_IP6_DEST | EBT_IP6_TCLASS |\
- EBT_IP6_PROTO | EBT_IP6_SPORT | EBT_IP6_DPORT)
+ EBT_IP6_PROTO | EBT_IP6_SPORT | EBT_IP6_DPORT | \
+ EBT_IP6_ICMP6)
#define EBT_IP6_MATCH "ip6"
/* the same values are used for the invflags */
@@ -28,12 +33,18 @@ struct ebt_ip6_info {
struct in6_addr daddr;
struct in6_addr smsk;
struct in6_addr dmsk;
- uint8_t tclass;
- uint8_t protocol;
- uint8_t bitmask;
- uint8_t invflags;
- uint16_t sport[2];
- uint16_t dport[2];
+ __u8 tclass;
+ __u8 protocol;
+ __u8 bitmask;
+ __u8 invflags;
+ union {
+ __u16 sport[2];
+ __u8 icmpv6_type[2];
+ };
+ union {
+ __u16 dport[2];
+ __u8 icmpv6_code[2];
+ };
};
#endif
diff --git a/include/linux/netfilter_bridge/ebt_limit.h b/include/linux/netfilter_bridge/ebt_limit.h
index 4bf76b7..66d80b3 100644
--- a/include/linux/netfilter_bridge/ebt_limit.h
+++ b/include/linux/netfilter_bridge/ebt_limit.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_LIMIT_H
#define __LINUX_BRIDGE_EBT_LIMIT_H
+#include <linux/types.h>
+
#define EBT_LIMIT_MATCH "limit"
/* timings are in milliseconds. */
@@ -10,13 +12,13 @@
seconds, or one every 59 hours. */
struct ebt_limit_info {
- u_int32_t avg; /* Average secs between packets * scale */
- u_int32_t burst; /* Period multiplier for upper limit. */
+ __u32 avg; /* Average secs between packets * scale */
+ __u32 burst; /* Period multiplier for upper limit. */
/* Used internally by the kernel */
unsigned long prev;
- u_int32_t credit;
- u_int32_t credit_cap, cost;
+ __u32 credit;
+ __u32 credit_cap, cost;
};
#endif
diff --git a/include/linux/netfilter_bridge/ebt_log.h b/include/linux/netfilter_bridge/ebt_log.h
index cc2cdfb..7e7f1d1 100644
--- a/include/linux/netfilter_bridge/ebt_log.h
+++ b/include/linux/netfilter_bridge/ebt_log.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_LOG_H
#define __LINUX_BRIDGE_EBT_LOG_H
+#include <linux/types.h>
+
#define EBT_LOG_IP 0x01 /* if the frame is made by ip, log the ip information */
#define EBT_LOG_ARP 0x02
#define EBT_LOG_NFLOG 0x04
@@ -10,9 +12,9 @@
#define EBT_LOG_WATCHER "log"
struct ebt_log_info {
- uint8_t loglevel;
- uint8_t prefix[EBT_LOG_PREFIX_SIZE];
- uint32_t bitmask;
+ __u8 loglevel;
+ __u8 prefix[EBT_LOG_PREFIX_SIZE];
+ __u32 bitmask;
};
#endif
diff --git a/include/linux/netfilter_bridge/ebt_mark_m.h b/include/linux/netfilter_bridge/ebt_mark_m.h
index 9ceb10e..410f9e5 100644
--- a/include/linux/netfilter_bridge/ebt_mark_m.h
+++ b/include/linux/netfilter_bridge/ebt_mark_m.h
@@ -1,13 +1,15 @@
#ifndef __LINUX_BRIDGE_EBT_MARK_M_H
#define __LINUX_BRIDGE_EBT_MARK_M_H
+#include <linux/types.h>
+
#define EBT_MARK_AND 0x01
#define EBT_MARK_OR 0x02
#define EBT_MARK_MASK (EBT_MARK_AND | EBT_MARK_OR)
struct ebt_mark_m_info {
unsigned long mark, mask;
- uint8_t invert;
- uint8_t bitmask;
+ __u8 invert;
+ __u8 bitmask;
};
#define EBT_MARK_MATCH "mark_m"
diff --git a/include/linux/netfilter_bridge/ebt_nflog.h b/include/linux/netfilter_bridge/ebt_nflog.h
index 0528178..df829fc 100644
--- a/include/linux/netfilter_bridge/ebt_nflog.h
+++ b/include/linux/netfilter_bridge/ebt_nflog.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_NFLOG_H
#define __LINUX_BRIDGE_EBT_NFLOG_H
+#include <linux/types.h>
+
#define EBT_NFLOG_MASK 0x0
#define EBT_NFLOG_PREFIX_SIZE 64
@@ -10,11 +12,11 @@
#define EBT_NFLOG_DEFAULT_THRESHOLD 1
struct ebt_nflog_info {
- u_int32_t len;
- u_int16_t group;
- u_int16_t threshold;
- u_int16_t flags;
- u_int16_t pad;
+ __u32 len;
+ __u16 group;
+ __u16 threshold;
+ __u16 flags;
+ __u16 pad;
char prefix[EBT_NFLOG_PREFIX_SIZE];
};
diff --git a/include/linux/netfilter_bridge/ebt_pkttype.h b/include/linux/netfilter_bridge/ebt_pkttype.h
index 51a7998..c241bad 100644
--- a/include/linux/netfilter_bridge/ebt_pkttype.h
+++ b/include/linux/netfilter_bridge/ebt_pkttype.h
@@ -1,9 +1,11 @@
#ifndef __LINUX_BRIDGE_EBT_PKTTYPE_H
#define __LINUX_BRIDGE_EBT_PKTTYPE_H
+#include <linux/types.h>
+
struct ebt_pkttype_info {
- uint8_t pkt_type;
- uint8_t invert;
+ __u8 pkt_type;
+ __u8 invert;
};
#define EBT_PKTTYPE_MATCH "pkttype"
diff --git a/include/linux/netfilter_bridge/ebt_stp.h b/include/linux/netfilter_bridge/ebt_stp.h
index e503a0a..1025b9f 100644
--- a/include/linux/netfilter_bridge/ebt_stp.h
+++ b/include/linux/netfilter_bridge/ebt_stp.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_STP_H
#define __LINUX_BRIDGE_EBT_STP_H
+#include <linux/types.h>
+
#define EBT_STP_TYPE 0x0001
#define EBT_STP_FLAGS 0x0002
@@ -21,24 +23,24 @@
#define EBT_STP_MATCH "stp"
struct ebt_stp_config_info {
- uint8_t flags;
- uint16_t root_priol, root_priou;
+ __u8 flags;
+ __u16 root_priol, root_priou;
char root_addr[6], root_addrmsk[6];
- uint32_t root_costl, root_costu;
- uint16_t sender_priol, sender_priou;
+ __u32 root_costl, root_costu;
+ __u16 sender_priol, sender_priou;
char sender_addr[6], sender_addrmsk[6];
- uint16_t portl, portu;
- uint16_t msg_agel, msg_ageu;
- uint16_t max_agel, max_ageu;
- uint16_t hello_timel, hello_timeu;
- uint16_t forward_delayl, forward_delayu;
+ __u16 portl, portu;
+ __u16 msg_agel, msg_ageu;
+ __u16 max_agel, max_ageu;
+ __u16 hello_timel, hello_timeu;
+ __u16 forward_delayl, forward_delayu;
};
struct ebt_stp_info {
- uint8_t type;
+ __u8 type;
struct ebt_stp_config_info config;
- uint16_t bitmask;
- uint16_t invflags;
+ __u16 bitmask;
+ __u16 invflags;
};
#endif
diff --git a/include/linux/netfilter_bridge/ebt_ulog.h b/include/linux/netfilter_bridge/ebt_ulog.h
index b677e26..89a6bec 100644
--- a/include/linux/netfilter_bridge/ebt_ulog.h
+++ b/include/linux/netfilter_bridge/ebt_ulog.h
@@ -1,6 +1,8 @@
#ifndef _EBT_ULOG_H
#define _EBT_ULOG_H
+#include <linux/types.h>
+
#define EBT_ULOG_DEFAULT_NLGROUP 0
#define EBT_ULOG_DEFAULT_QTHRESHOLD 1
#define EBT_ULOG_MAXNLGROUPS 32 /* hardcoded netlink max */
@@ -10,7 +12,7 @@
#define EBT_ULOG_VERSION 1
struct ebt_ulog_info {
- uint32_t nlgroup;
+ __u32 nlgroup;
unsigned int cprange;
unsigned int qthreshold;
char prefix[EBT_ULOG_PREFIX_LEN];
diff --git a/include/linux/netfilter_bridge/ebt_vlan.h b/include/linux/netfilter_bridge/ebt_vlan.h
index 1d98be4..967d1d5 100644
--- a/include/linux/netfilter_bridge/ebt_vlan.h
+++ b/include/linux/netfilter_bridge/ebt_vlan.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_BRIDGE_EBT_VLAN_H
#define __LINUX_BRIDGE_EBT_VLAN_H
+#include <linux/types.h>
+
#define EBT_VLAN_ID 0x01
#define EBT_VLAN_PRIO 0x02
#define EBT_VLAN_ENCAP 0x04
@@ -8,12 +10,12 @@
#define EBT_VLAN_MATCH "vlan"
struct ebt_vlan_info {
- uint16_t id; /* VLAN ID {1-4095} */
- uint8_t prio; /* VLAN User Priority {0-7} */
+ __u16 id; /* VLAN ID {1-4095} */
+ __u8 prio; /* VLAN User Priority {0-7} */
__be16 encap; /* VLAN Encapsulated frame code {0-65535} */
- uint8_t bitmask; /* Args bitmask bit 1=1 - ID arg,
+ __u8 bitmask; /* Args bitmask bit 1=1 - ID arg,
bit 2=1 User-Priority arg, bit 3=1 encap*/
- uint8_t invflags; /* Inverse bitmask bit 1=1 - inversed ID arg,
+ __u8 invflags; /* Inverse bitmask bit 1=1 - inversed ID arg,
bit 2=1 - inversed Pirority arg */
};
diff --git a/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h b/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h
index e5a3687..c6a204c 100644
--- a/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h
+++ b/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h
@@ -1,6 +1,8 @@
#ifndef _IPT_CLUSTERIP_H_target
#define _IPT_CLUSTERIP_H_target
+#include <linux/types.h>
+
enum clusterip_hashmode {
CLUSTERIP_HASHMODE_SIP = 0,
CLUSTERIP_HASHMODE_SIP_SPT,
@@ -17,15 +19,15 @@ struct clusterip_config;
struct ipt_clusterip_tgt_info {
- u_int32_t flags;
+ __u32 flags;
/* only relevant for new ones */
- u_int8_t clustermac[6];
- u_int16_t num_total_nodes;
- u_int16_t num_local_nodes;
- u_int16_t local_nodes[CLUSTERIP_MAX_NODES];
- u_int32_t hash_mode;
- u_int32_t hash_initval;
+ __u8 clustermac[6];
+ __u16 num_total_nodes;
+ __u16 num_local_nodes;
+ __u16 local_nodes[CLUSTERIP_MAX_NODES];
+ __u32 hash_mode;
+ __u32 hash_initval;
/* Used internally by the kernel */
struct clusterip_config *config;
diff --git a/include/linux/netfilter_ipv4/ipt_ECN.h b/include/linux/netfilter_ipv4/ipt_ECN.h
index 7ca4591..bb88d53 100644
--- a/include/linux/netfilter_ipv4/ipt_ECN.h
+++ b/include/linux/netfilter_ipv4/ipt_ECN.h
@@ -8,6 +8,8 @@
*/
#ifndef _IPT_ECN_TARGET_H
#define _IPT_ECN_TARGET_H
+
+#include <linux/types.h>
#include <linux/netfilter/xt_DSCP.h>
#define IPT_ECN_IP_MASK (~XT_DSCP_MASK)
@@ -19,11 +21,11 @@
#define IPT_ECN_OP_MASK 0xce
struct ipt_ECN_info {
- u_int8_t operation; /* bitset of operations */
- u_int8_t ip_ect; /* ECT codepoint of IPv4 header, pre-shifted */
+ __u8 operation; /* bitset of operations */
+ __u8 ip_ect; /* ECT codepoint of IPv4 header, pre-shifted */
union {
struct {
- u_int8_t ece:1, cwr:1; /* TCP ECT bits */
+ __u8 ece:1, cwr:1; /* TCP ECT bits */
} tcp;
} proto;
};
diff --git a/include/linux/netfilter_ipv4/ipt_SAME.h b/include/linux/netfilter_ipv4/ipt_SAME.h
index 2529660..5bca782 100644
--- a/include/linux/netfilter_ipv4/ipt_SAME.h
+++ b/include/linux/netfilter_ipv4/ipt_SAME.h
@@ -1,15 +1,17 @@
#ifndef _IPT_SAME_H
#define _IPT_SAME_H
+#include <linux/types.h>
+
#define IPT_SAME_MAX_RANGE 10
#define IPT_SAME_NODST 0x01
struct ipt_same_info {
unsigned char info;
- u_int32_t rangesize;
- u_int32_t ipnum;
- u_int32_t *iparray;
+ __u32 rangesize;
+ __u32 ipnum;
+ __u32 *iparray;
/* hangs off end. */
struct nf_nat_range range[IPT_SAME_MAX_RANGE];
diff --git a/include/linux/netfilter_ipv4/ipt_TTL.h b/include/linux/netfilter_ipv4/ipt_TTL.h
index ee6611e..f6ac169 100644
--- a/include/linux/netfilter_ipv4/ipt_TTL.h
+++ b/include/linux/netfilter_ipv4/ipt_TTL.h
@@ -4,6 +4,8 @@
#ifndef _IPT_TTL_H
#define _IPT_TTL_H
+#include <linux/types.h>
+
enum {
IPT_TTL_SET = 0,
IPT_TTL_INC,
@@ -13,8 +15,8 @@ enum {
#define IPT_TTL_MAXMODE IPT_TTL_DEC
struct ipt_TTL_info {
- u_int8_t mode;
- u_int8_t ttl;
+ __u8 mode;
+ __u8 ttl;
};
diff --git a/include/linux/netfilter_ipv4/ipt_addrtype.h b/include/linux/netfilter_ipv4/ipt_addrtype.h
index 446de6a..0da4223 100644
--- a/include/linux/netfilter_ipv4/ipt_addrtype.h
+++ b/include/linux/netfilter_ipv4/ipt_addrtype.h
@@ -1,6 +1,8 @@
#ifndef _IPT_ADDRTYPE_H
#define _IPT_ADDRTYPE_H
+#include <linux/types.h>
+
enum {
IPT_ADDRTYPE_INVERT_SOURCE = 0x0001,
IPT_ADDRTYPE_INVERT_DEST = 0x0002,
@@ -9,17 +11,17 @@ enum {
};
struct ipt_addrtype_info_v1 {
- u_int16_t source; /* source-type mask */
- u_int16_t dest; /* dest-type mask */
- u_int32_t flags;
+ __u16 source; /* source-type mask */
+ __u16 dest; /* dest-type mask */
+ __u32 flags;
};
/* revision 0 */
struct ipt_addrtype_info {
- u_int16_t source; /* source-type mask */
- u_int16_t dest; /* dest-type mask */
- u_int32_t invert_source;
- u_int32_t invert_dest;
+ __u16 source; /* source-type mask */
+ __u16 dest; /* dest-type mask */
+ __u32 invert_source;
+ __u32 invert_dest;
};
#endif
diff --git a/include/linux/netfilter_ipv4/ipt_ah.h b/include/linux/netfilter_ipv4/ipt_ah.h
index 2e555b4..4e02bb0 100644
--- a/include/linux/netfilter_ipv4/ipt_ah.h
+++ b/include/linux/netfilter_ipv4/ipt_ah.h
@@ -1,9 +1,11 @@
#ifndef _IPT_AH_H
#define _IPT_AH_H
+#include <linux/types.h>
+
struct ipt_ah {
- u_int32_t spis[2]; /* Security Parameter Index */
- u_int8_t invflags; /* Inverse flags */
+ __u32 spis[2]; /* Security Parameter Index */
+ __u8 invflags; /* Inverse flags */
};
diff --git a/include/linux/netfilter_ipv4/ipt_ecn.h b/include/linux/netfilter_ipv4/ipt_ecn.h
index 9945baa..eabf95f 100644
--- a/include/linux/netfilter_ipv4/ipt_ecn.h
+++ b/include/linux/netfilter_ipv4/ipt_ecn.h
@@ -8,6 +8,8 @@
*/
#ifndef _IPT_ECN_H
#define _IPT_ECN_H
+
+#include <linux/types.h>
#include <linux/netfilter/xt_dscp.h>
#define IPT_ECN_IP_MASK (~XT_DSCP_MASK)
@@ -20,12 +22,12 @@
/* match info */
struct ipt_ecn_info {
- u_int8_t operation;
- u_int8_t invert;
- u_int8_t ip_ect;
+ __u8 operation;
+ __u8 invert;
+ __u8 ip_ect;
union {
struct {
- u_int8_t ect;
+ __u8 ect;
} tcp;
} proto;
};
diff --git a/include/linux/netfilter_ipv4/ipt_ttl.h b/include/linux/netfilter_ipv4/ipt_ttl.h
index ee24fd8..37bee44 100644
--- a/include/linux/netfilter_ipv4/ipt_ttl.h
+++ b/include/linux/netfilter_ipv4/ipt_ttl.h
@@ -4,6 +4,8 @@
#ifndef _IPT_TTL_H
#define _IPT_TTL_H
+#include <linux/types.h>
+
enum {
IPT_TTL_EQ = 0, /* equals */
IPT_TTL_NE, /* not equals */
@@ -13,8 +15,8 @@ enum {
struct ipt_ttl_info {
- u_int8_t mode;
- u_int8_t ttl;
+ __u8 mode;
+ __u8 ttl;
};
diff --git a/include/linux/netfilter_ipv6/ip6t_HL.h b/include/linux/netfilter_ipv6/ip6t_HL.h
index afb7813..ebd8ead 100644
--- a/include/linux/netfilter_ipv6/ip6t_HL.h
+++ b/include/linux/netfilter_ipv6/ip6t_HL.h
@@ -5,6 +5,8 @@
#ifndef _IP6T_HL_H
#define _IP6T_HL_H
+#include <linux/types.h>
+
enum {
IP6T_HL_SET = 0,
IP6T_HL_INC,
@@ -14,8 +16,8 @@ enum {
#define IP6T_HL_MAXMODE IP6T_HL_DEC
struct ip6t_HL_info {
- u_int8_t mode;
- u_int8_t hop_limit;
+ __u8 mode;
+ __u8 hop_limit;
};
diff --git a/include/linux/netfilter_ipv6/ip6t_REJECT.h b/include/linux/netfilter_ipv6/ip6t_REJECT.h
index 6be6504..205ed62 100644
--- a/include/linux/netfilter_ipv6/ip6t_REJECT.h
+++ b/include/linux/netfilter_ipv6/ip6t_REJECT.h
@@ -1,6 +1,8 @@
#ifndef _IP6T_REJECT_H
#define _IP6T_REJECT_H
+#include <linux/types.h>
+
enum ip6t_reject_with {
IP6T_ICMP6_NO_ROUTE,
IP6T_ICMP6_ADM_PROHIBITED,
@@ -12,7 +14,7 @@ enum ip6t_reject_with {
};
struct ip6t_reject_info {
- u_int32_t with; /* reject type */
+ __u32 with; /* reject type */
};
#endif /*_IP6T_REJECT_H*/
diff --git a/include/linux/netfilter_ipv6/ip6t_ah.h b/include/linux/netfilter_ipv6/ip6t_ah.h
index 17a745c..5da2b65 100644
--- a/include/linux/netfilter_ipv6/ip6t_ah.h
+++ b/include/linux/netfilter_ipv6/ip6t_ah.h
@@ -1,11 +1,13 @@
#ifndef _IP6T_AH_H
#define _IP6T_AH_H
+#include <linux/types.h>
+
struct ip6t_ah {
- u_int32_t spis[2]; /* Security Parameter Index */
- u_int32_t hdrlen; /* Header Length */
- u_int8_t hdrres; /* Test of the Reserved Filed */
- u_int8_t invflags; /* Inverse flags */
+ __u32 spis[2]; /* Security Parameter Index */
+ __u32 hdrlen; /* Header Length */
+ __u8 hdrres; /* Test of the Reserved Filed */
+ __u8 invflags; /* Inverse flags */
};
#define IP6T_AH_SPI 0x01
diff --git a/include/linux/netfilter_ipv6/ip6t_frag.h b/include/linux/netfilter_ipv6/ip6t_frag.h
index 3724d08..b47f61b 100644
--- a/include/linux/netfilter_ipv6/ip6t_frag.h
+++ b/include/linux/netfilter_ipv6/ip6t_frag.h
@@ -1,11 +1,13 @@
#ifndef _IP6T_FRAG_H
#define _IP6T_FRAG_H
+#include <linux/types.h>
+
struct ip6t_frag {
- u_int32_t ids[2]; /* Security Parameter Index */
- u_int32_t hdrlen; /* Header Length */
- u_int8_t flags; /* */
- u_int8_t invflags; /* Inverse flags */
+ __u32 ids[2]; /* Security Parameter Index */
+ __u32 hdrlen; /* Header Length */
+ __u8 flags; /* */
+ __u8 invflags; /* Inverse flags */
};
#define IP6T_FRAG_IDS 0x01
diff --git a/include/linux/netfilter_ipv6/ip6t_hl.h b/include/linux/netfilter_ipv6/ip6t_hl.h
index 5ef91b8..6e76dbc 100644
--- a/include/linux/netfilter_ipv6/ip6t_hl.h
+++ b/include/linux/netfilter_ipv6/ip6t_hl.h
@@ -5,6 +5,8 @@
#ifndef _IP6T_HL_H
#define _IP6T_HL_H
+#include <linux/types.h>
+
enum {
IP6T_HL_EQ = 0, /* equals */
IP6T_HL_NE, /* not equals */
@@ -14,8 +16,8 @@ enum {
struct ip6t_hl_info {
- u_int8_t mode;
- u_int8_t hop_limit;
+ __u8 mode;
+ __u8 hop_limit;
};
diff --git a/include/linux/netfilter_ipv6/ip6t_ipv6header.h b/include/linux/netfilter_ipv6/ip6t_ipv6header.h
index 01dfd44..efae3a2 100644
--- a/include/linux/netfilter_ipv6/ip6t_ipv6header.h
+++ b/include/linux/netfilter_ipv6/ip6t_ipv6header.h
@@ -8,10 +8,12 @@ on whether they contain certain headers */
#ifndef __IPV6HEADER_H
#define __IPV6HEADER_H
+#include <linux/types.h>
+
struct ip6t_ipv6header_info {
- u_int8_t matchflags;
- u_int8_t invflags;
- u_int8_t modeflag;
+ __u8 matchflags;
+ __u8 invflags;
+ __u8 modeflag;
};
#define MASK_HOPOPTS 128
diff --git a/include/linux/netfilter_ipv6/ip6t_mh.h b/include/linux/netfilter_ipv6/ip6t_mh.h
index 18549bc..a7729a5 100644
--- a/include/linux/netfilter_ipv6/ip6t_mh.h
+++ b/include/linux/netfilter_ipv6/ip6t_mh.h
@@ -1,10 +1,12 @@
#ifndef _IP6T_MH_H
#define _IP6T_MH_H
+#include <linux/types.h>
+
/* MH matching stuff */
struct ip6t_mh {
- u_int8_t types[2]; /* MH type range */
- u_int8_t invflags; /* Inverse flags */
+ __u8 types[2]; /* MH type range */
+ __u8 invflags; /* Inverse flags */
};
/* Values for "invflags" field in struct ip6t_mh. */
diff --git a/include/linux/netfilter_ipv6/ip6t_opts.h b/include/linux/netfilter_ipv6/ip6t_opts.h
index 62d89bc..17d419a 100644
--- a/include/linux/netfilter_ipv6/ip6t_opts.h
+++ b/include/linux/netfilter_ipv6/ip6t_opts.h
@@ -1,14 +1,16 @@
#ifndef _IP6T_OPTS_H
#define _IP6T_OPTS_H
+#include <linux/types.h>
+
#define IP6T_OPTS_OPTSNR 16
struct ip6t_opts {
- u_int32_t hdrlen; /* Header Length */
- u_int8_t flags; /* */
- u_int8_t invflags; /* Inverse flags */
- u_int16_t opts[IP6T_OPTS_OPTSNR]; /* opts */
- u_int8_t optsnr; /* Nr of OPts */
+ __u32 hdrlen; /* Header Length */
+ __u8 flags; /* */
+ __u8 invflags; /* Inverse flags */
+ __u16 opts[IP6T_OPTS_OPTSNR]; /* opts */
+ __u8 optsnr; /* Nr of OPts */
};
#define IP6T_OPTS_LEN 0x01
diff --git a/include/linux/netfilter_ipv6/ip6t_rt.h b/include/linux/netfilter_ipv6/ip6t_rt.h
index ab91bfd..7605a5f 100644
--- a/include/linux/netfilter_ipv6/ip6t_rt.h
+++ b/include/linux/netfilter_ipv6/ip6t_rt.h
@@ -1,18 +1,19 @@
#ifndef _IP6T_RT_H
#define _IP6T_RT_H
+#include <linux/types.h>
/*#include <linux/in6.h>*/
#define IP6T_RT_HOPS 16
struct ip6t_rt {
- u_int32_t rt_type; /* Routing Type */
- u_int32_t segsleft[2]; /* Segments Left */
- u_int32_t hdrlen; /* Header Length */
- u_int8_t flags; /* */
- u_int8_t invflags; /* Inverse flags */
+ __u32 rt_type; /* Routing Type */
+ __u32 segsleft[2]; /* Segments Left */
+ __u32 hdrlen; /* Header Length */
+ __u8 flags; /* */
+ __u8 invflags; /* Inverse flags */
struct in6_addr addrs[IP6T_RT_HOPS]; /* Hops */
- u_int8_t addrnr; /* Nr of Addresses */
+ __u8 addrnr; /* Nr of Addresses */
};
#define IP6T_RT_TYP 0x01
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index 2cfa4bc..d4bb6f5 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -247,6 +247,35 @@ struct tc_gred_sopt {
__u16 pad1;
};
+/* CHOKe section */
+
+enum {
+ TCA_CHOKE_UNSPEC,
+ TCA_CHOKE_PARMS,
+ TCA_CHOKE_STAB,
+ __TCA_CHOKE_MAX,
+};
+
+#define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1)
+
+struct tc_choke_qopt {
+ __u32 limit; /* Hard queue length (packets) */
+ __u32 qth_min; /* Min average threshold (packets) */
+ __u32 qth_max; /* Max average threshold (packets) */
+ unsigned char Wlog; /* log(W) */
+ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
+ unsigned char Scell_log; /* cell size for idle damping */
+ unsigned char flags; /* see RED flags */
+};
+
+struct tc_choke_xstats {
+ __u32 early; /* Early drops */
+ __u32 pdrop; /* Drops due to queue limits */
+ __u32 other; /* Drops due to drop() calls */
+ __u32 marked; /* Marked packets */
+ __u32 matched; /* Drops due to flow match */
+};
+
/* HTB section */
#define TC_HTB_NUMPRIO 8
#define TC_HTB_MAXDEPTH 8
@@ -481,4 +510,16 @@ struct tc_drr_stats {
__u32 deficit;
};
+/* MQPRIO */
+#define TC_QOPT_BITMASK 15
+#define TC_QOPT_MAX_QUEUE 16
+
+struct tc_mqprio_qopt {
+ __u8 num_tc;
+ __u8 prio_tc_map[TC_QOPT_BITMASK + 1];
+ __u8 hw;
+ __u16 count[TC_QOPT_MAX_QUEUE];
+ __u16 offset[TC_QOPT_MAX_QUEUE];
+};
+
#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index bf221d6..31f02d0 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1801,6 +1801,15 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \
skb = skb->prev)
+#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
+ for (skb = (queue)->prev, tmp = skb->prev; \
+ skb != (struct sk_buff *)(queue); \
+ skb = tmp, tmp = skb->prev)
+
+#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
+ for (tmp = skb->prev; \
+ skb != (struct sk_buff *)(queue); \
+ skb = tmp, tmp = skb->prev)
static inline bool skb_has_frag_list(const struct sk_buff *skb)
{
@@ -1868,7 +1877,7 @@ extern void skb_split(struct sk_buff *skb,
extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
int shiftlen);
-extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
+extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features);
static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
int len, void *buffer)
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index 930fdd2..b93d6f5 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -350,6 +350,7 @@ struct xfrm_usersa_info {
#define XFRM_STATE_WILDRECV 8
#define XFRM_STATE_ICMP 16
#define XFRM_STATE_AF_UNSPEC 32
+#define XFRM_STATE_ALIGN4 64
};
struct xfrm_usersa_id {
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 1322695..679a049 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1790,8 +1790,9 @@ static inline void *wdev_priv(struct wireless_dev *wdev)
/**
* ieee80211_channel_to_frequency - convert channel number to frequency
* @chan: channel number
+ * @band: band, necessary due to channel number overlap
*/
-extern int ieee80211_channel_to_frequency(int chan);
+extern int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band);
/**
* ieee80211_frequency_to_channel - convert frequency to channel number
diff --git a/include/net/dst.h b/include/net/dst.h
index 93b0310..23b564d 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -40,24 +40,10 @@ struct dst_entry {
struct rcu_head rcu_head;
struct dst_entry *child;
struct net_device *dev;
- short error;
- short obsolete;
- int flags;
-#define DST_HOST 0x0001
-#define DST_NOXFRM 0x0002
-#define DST_NOPOLICY 0x0004
-#define DST_NOHASH 0x0008
-#define DST_NOCACHE 0x0010
+ struct dst_ops *ops;
+ unsigned long _metrics;
unsigned long expires;
-
- unsigned short header_len; /* more space at head required */
- unsigned short trailer_len; /* space to reserve at tail */
-
- unsigned int rate_tokens;
- unsigned long rate_last; /* rate limiting for ICMP */
-
struct dst_entry *path;
-
struct neighbour *neighbour;
struct hh_cache *hh;
#ifdef CONFIG_XFRM
@@ -68,17 +54,16 @@ struct dst_entry {
int (*input)(struct sk_buff*);
int (*output)(struct sk_buff*);
- struct dst_ops *ops;
-
- u32 _metrics[RTAX_MAX];
-
-#ifdef CONFIG_NET_CLS_ROUTE
+ short error;
+ short obsolete;
+ unsigned short header_len; /* more space at head required */
+ unsigned short trailer_len; /* space to reserve at tail */
+#ifdef CONFIG_IP_ROUTE_CLASSID
__u32 tclassid;
#else
__u32 __pad2;
#endif
-
/*
* Align __refcnt to a 64 bytes alignment
* (L1_CACHE_SIZE would be too much)
@@ -93,6 +78,12 @@ struct dst_entry {
atomic_t __refcnt; /* client references */
int __use;
unsigned long lastuse;
+ int flags;
+#define DST_HOST 0x0001
+#define DST_NOXFRM 0x0002
+#define DST_NOPOLICY 0x0004
+#define DST_NOHASH 0x0008
+#define DST_NOCACHE 0x0010
union {
struct dst_entry *next;
struct rtable __rcu *rt_next;
@@ -103,10 +94,70 @@ struct dst_entry {
#ifdef __KERNEL__
+extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
+extern const u32 dst_default_metrics[RTAX_MAX];
+
+#define DST_METRICS_READ_ONLY 0x1UL
+#define __DST_METRICS_PTR(Y) \
+ ((u32 *)((Y) & ~DST_METRICS_READ_ONLY))
+#define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics)
+
+static inline bool dst_metrics_read_only(const struct dst_entry *dst)
+{
+ return dst->_metrics & DST_METRICS_READ_ONLY;
+}
+
+extern void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
+
+static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
+{
+ unsigned long val = dst->_metrics;
+ if (!(val & DST_METRICS_READ_ONLY))
+ __dst_destroy_metrics_generic(dst, val);
+}
+
+static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
+{
+ unsigned long p = dst->_metrics;
+
+ if (p & DST_METRICS_READ_ONLY)
+ return dst->ops->cow_metrics(dst, p);
+ return __DST_METRICS_PTR(p);
+}
+
+/* This may only be invoked before the entry has reached global
+ * visibility.
+ */
+static inline void dst_init_metrics(struct dst_entry *dst,
+ const u32 *src_metrics,
+ bool read_only)
+{
+ dst->_metrics = ((unsigned long) src_metrics) |
+ (read_only ? DST_METRICS_READ_ONLY : 0);
+}
+
+static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
+{
+ u32 *dst_metrics = dst_metrics_write_ptr(dest);
+
+ if (dst_metrics) {
+ u32 *src_metrics = DST_METRICS_PTR(src);
+
+ memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
+ }
+}
+
+static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
+{
+ return DST_METRICS_PTR(dst);
+}
+
static inline u32
dst_metric_raw(const struct dst_entry *dst, const int metric)
{
- return dst->_metrics[metric-1];
+ u32 *p = DST_METRICS_PTR(dst);
+
+ return p[metric-1];
}
static inline u32
@@ -131,22 +182,10 @@ dst_metric_advmss(const struct dst_entry *dst)
static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
{
- dst->_metrics[metric-1] = val;
-}
+ u32 *p = dst_metrics_write_ptr(dst);
-static inline void dst_import_metrics(struct dst_entry *dst, const u32 *src_metrics)
-{
- memcpy(dst->_metrics, src_metrics, RTAX_MAX * sizeof(u32));
-}
-
-static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
-{
- dst_import_metrics(dest, src->_metrics);
-}
-
-static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
-{
- return dst->_metrics;
+ if (p)
+ p[metric-1] = val;
}
static inline u32
@@ -181,8 +220,6 @@ static inline u32
dst_allfrag(const struct dst_entry *dst)
{
int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG);
- /* Yes, _exactly_. This is paranoia. */
- barrier();
return ret;
}
@@ -315,7 +352,7 @@ static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
}
extern int dst_discard(struct sk_buff *skb);
-extern void * dst_alloc(struct dst_ops * ops);
+extern void *dst_alloc(struct dst_ops * ops, int initial_ref);
extern void __dst_free(struct dst_entry * dst);
extern struct dst_entry *dst_destroy(struct dst_entry * dst);
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 21a320b..dc07463 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -18,6 +18,7 @@ struct dst_ops {
struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
unsigned int (*default_advmss)(const struct dst_entry *);
unsigned int (*default_mtu)(const struct dst_entry *);
+ u32 * (*cow_metrics)(struct dst_entry *, unsigned long);
void (*destroy)(struct dst_entry *);
void (*ifdown)(struct dst_entry *,
struct net_device *dev, int how);
diff --git a/include/net/flow.h b/include/net/flow.h
index 240b7f3..1ae901f 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -48,7 +48,8 @@ struct flowi {
__u8 proto;
__u8 flags;
-#define FLOWI_FLAG_ANYSRC 0x01
+#define FLOWI_FLAG_ANYSRC 0x01
+#define FLOWI_FLAG_PRECOW_METRICS 0x02
union {
struct {
__be16 sport;
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 6e991e0..f0698b9 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -45,7 +45,4 @@ extern int icmp_ioctl(struct sock *sk, int cmd, unsigned long arg);
extern int icmp_init(void);
extern void icmp_out_count(struct net *net, unsigned char type);
-/* Move into dst.h ? */
-extern int xrlim_allow(struct dst_entry *dst, int timeout);
-
#endif /* _ICMP_H */
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index af49f8a..b0be5fb 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -178,6 +178,11 @@ struct ieee80211_radiotap_header {
*
* Number of unicast retries a transmitted frame used.
*
+ * IEEE80211_RADIOTAP_MCS u8, u8, u8 unitless
+ *
+ * Contains a bitmap of known fields/flags, the flags, and
+ * the MCS index.
+ *
*/
enum ieee80211_radiotap_type {
IEEE80211_RADIOTAP_TSFT = 0,
@@ -199,6 +204,8 @@ enum ieee80211_radiotap_type {
IEEE80211_RADIOTAP_RTS_RETRIES = 16,
IEEE80211_RADIOTAP_DATA_RETRIES = 17,
+ IEEE80211_RADIOTAP_MCS = 19,
+
/* valid in every it_present bitmap, even vendor namespaces */
IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
IEEE80211_RADIOTAP_VENDOR_NAMESPACE = 30,
@@ -245,6 +252,24 @@ enum ieee80211_radiotap_type {
#define IEEE80211_RADIOTAP_F_TX_CTS 0x0002 /* used cts 'protection' */
#define IEEE80211_RADIOTAP_F_TX_RTS 0x0004 /* used rts/cts handshake */
+
+/* For IEEE80211_RADIOTAP_MCS */
+#define IEEE80211_RADIOTAP_MCS_HAVE_BW 0x01
+#define IEEE80211_RADIOTAP_MCS_HAVE_MCS 0x02
+#define IEEE80211_RADIOTAP_MCS_HAVE_GI 0x04
+#define IEEE80211_RADIOTAP_MCS_HAVE_FMT 0x08
+#define IEEE80211_RADIOTAP_MCS_HAVE_FEC 0x10
+
+#define IEEE80211_RADIOTAP_MCS_BW_MASK 0x03
+#define IEEE80211_RADIOTAP_MCS_BW_20 0
+#define IEEE80211_RADIOTAP_MCS_BW_40 1
+#define IEEE80211_RADIOTAP_MCS_BW_20L 2
+#define IEEE80211_RADIOTAP_MCS_BW_20U 3
+#define IEEE80211_RADIOTAP_MCS_SGI 0x04
+#define IEEE80211_RADIOTAP_MCS_FMT_GF 0x08
+#define IEEE80211_RADIOTAP_MCS_FEC_LDPC 0x10
+
+
/* Ugly macro to convert literal channel numbers into their mhz equivalents
* There are certianly some conditions that will break this (like feeding it '30')
* but they shouldn't arise since nothing talks on channel 30. */
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 8181498..6e6dfd7 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -219,7 +219,13 @@ static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops
static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
{
- return inet_sk(sk)->transparent ? FLOWI_FLAG_ANYSRC : 0;
+ __u8 flags = 0;
+
+ if (inet_sk(sk)->transparent)
+ flags |= FLOWI_FLAG_ANYSRC;
+ if (sk->sk_protocol == IPPROTO_TCP)
+ flags |= FLOWI_FLAG_PRECOW_METRICS;
+ return flags;
}
#endif /* _INET_SOCK_H */
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 599d96e..e6dd8da6 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -11,15 +11,20 @@
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/spinlock.h>
+#include <linux/rtnetlink.h>
#include <net/ipv6.h>
#include <asm/atomic.h>
-struct inetpeer_addr {
+struct inetpeer_addr_base {
union {
- __be32 a4;
- __be32 a6[4];
+ __be32 a4;
+ __be32 a6[4];
};
- __u16 family;
+};
+
+struct inetpeer_addr {
+ struct inetpeer_addr_base addr;
+ __u16 family;
};
struct inet_peer {
@@ -33,15 +38,22 @@ struct inet_peer {
atomic_t refcnt;
/*
* Once inet_peer is queued for deletion (refcnt == -1), following fields
- * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
- * We can share memory with rcu_head to keep inet_peer small
+ * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp, metrics
+ * We can share memory with rcu_head to help keep inet_peer small.
*/
union {
struct {
- atomic_t rid; /* Frag reception counter */
- atomic_t ip_id_count; /* IP ID for the next packet */
- __u32 tcp_ts;
- __u32 tcp_ts_stamp;
+ atomic_t rid; /* Frag reception counter */
+ atomic_t ip_id_count; /* IP ID for the next packet */
+ __u32 tcp_ts;
+ __u32 tcp_ts_stamp;
+ u32 metrics[RTAX_MAX];
+ u32 rate_tokens; /* rate limiting for ICMP */
+ unsigned long rate_last;
+ unsigned long pmtu_expires;
+ u32 pmtu_orig;
+ u32 pmtu_learned;
+ struct inetpeer_addr_base redirect_learned;
};
struct rcu_head rcu;
};
@@ -49,6 +61,13 @@ struct inet_peer {
void inet_initpeers(void) __init;
+#define INETPEER_METRICS_NEW (~(u32) 0)
+
+static inline bool inet_metrics_new(const struct inet_peer *p)
+{
+ return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW;
+}
+
/* can be called with or without local BH being disabled */
struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create);
@@ -56,7 +75,7 @@ static inline struct inet_peer *inet_getpeer_v4(__be32 v4daddr, int create)
{
struct inetpeer_addr daddr;
- daddr.a4 = v4daddr;
+ daddr.addr.a4 = v4daddr;
daddr.family = AF_INET;
return inet_getpeer(&daddr, create);
}
@@ -65,13 +84,14 @@ static inline struct inet_peer *inet_getpeer_v6(struct in6_addr *v6daddr, int cr
{
struct inetpeer_addr daddr;
- ipv6_addr_copy((struct in6_addr *)daddr.a6, v6daddr);
+ ipv6_addr_copy((struct in6_addr *)daddr.addr.a6, v6daddr);
daddr.family = AF_INET6;
return inet_getpeer(&daddr, create);
}
/* can be called from BH context or outside */
extern void inet_putpeer(struct inet_peer *p);
+extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
/*
* temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 708ff7c..46a6e8a 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -108,6 +108,7 @@ struct rt6_info {
u32 rt6i_flags;
struct rt6key rt6i_src;
u32 rt6i_metric;
+ u32 rt6i_peer_genid;
struct inet6_dev *rt6i_idev;
struct inet_peer *rt6i_peer;
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 07bdb5e..523a170 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -55,7 +55,7 @@ struct fib_nh {
int nh_weight;
int nh_power;
#endif
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
__u32 nh_tclassid;
#endif
int nh_oif;
@@ -77,7 +77,7 @@ struct fib_info {
int fib_protocol;
__be32 fib_prefsrc;
u32 fib_priority;
- u32 fib_metrics[RTAX_MAX];
+ u32 *fib_metrics;
#define fib_mtu fib_metrics[RTAX_MTU-1]
#define fib_window fib_metrics[RTAX_WINDOW-1]
#define fib_rtt fib_metrics[RTAX_RTT-1]
@@ -96,12 +96,15 @@ struct fib_info {
struct fib_rule;
#endif
+struct fib_table;
struct fib_result {
unsigned char prefixlen;
unsigned char nh_sel;
unsigned char type;
unsigned char scope;
struct fib_info *fi;
+ struct fib_table *table;
+ struct list_head *fa_head;
#ifdef CONFIG_IP_MULTIPLE_TABLES
struct fib_rule *r;
#endif
@@ -155,9 +158,6 @@ extern int fib_table_delete(struct fib_table *, struct fib_config *);
extern int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
struct netlink_callback *cb);
extern int fib_table_flush(struct fib_table *table);
-extern void fib_table_select_default(struct fib_table *table,
- const struct flowi *flp,
- struct fib_result *res);
extern void fib_free_table(struct fib_table *tb);
@@ -201,8 +201,8 @@ static inline int fib_lookup(struct net *net, const struct flowi *flp,
extern int __net_init fib4_rules_init(struct net *net);
extern void __net_exit fib4_rules_exit(struct net *net);
-#ifdef CONFIG_NET_CLS_ROUTE
-extern u32 fib_rules_tclass(struct fib_result *res);
+#ifdef CONFIG_IP_ROUTE_CLASSID
+extern u32 fib_rules_tclass(const struct fib_result *res);
#endif
extern int fib_lookup(struct net *n, struct flowi *flp, struct fib_result *res);
@@ -218,8 +218,7 @@ extern void ip_fib_init(void);
extern int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
struct net_device *dev, __be32 *spec_dst,
u32 *itag, u32 mark);
-extern void fib_select_default(struct net *net, const struct flowi *flp,
- struct fib_result *res);
+extern void fib_select_default(struct fib_result *res);
/* Exported by fib_semantics.c */
extern int ip_fib_check_default(__be32 gw, struct net_device *dev);
@@ -229,13 +228,13 @@ extern int fib_sync_up(struct net_device *dev);
extern __be32 __fib_res_prefsrc(struct fib_result *res);
extern void fib_select_multipath(const struct flowi *flp, struct fib_result *res);
-/* Exported by fib_{hash|trie}.c */
-extern void fib_hash_init(void);
-extern struct fib_table *fib_hash_table(u32 id);
+/* Exported by fib_trie.c */
+extern void fib_trie_init(void);
+extern struct fib_table *fib_trie_table(u32 id);
-static inline void fib_combine_itag(u32 *itag, struct fib_result *res)
+static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
{
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
#ifdef CONFIG_IP_MULTIPLE_TABLES
u32 rtag;
#endif
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index b7bbd6c..5d75fea 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -28,6 +28,80 @@
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
#include <net/netfilter/nf_conntrack.h>
#endif
+#include <net/net_namespace.h> /* Netw namespace */
+
+/*
+ * Generic access of ipvs struct
+ */
+static inline struct netns_ipvs *net_ipvs(struct net* net)
+{
+ return net->ipvs;
+}
+/*
+ * Get net ptr from skb in traffic cases
+ * use skb_sknet when call is from userland (ioctl or netlink)
+ */
+static inline struct net *skb_net(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+#ifdef CONFIG_IP_VS_DEBUG
+ /*
+ * This is used for debug only.
+ * Start with the most likely hit
+ * End with BUG
+ */
+ if (likely(skb->dev && skb->dev->nd_net))
+ return dev_net(skb->dev);
+ if (skb_dst(skb)->dev)
+ return dev_net(skb_dst(skb)->dev);
+ WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
+ __func__, __LINE__);
+ if (likely(skb->sk && skb->sk->sk_net))
+ return sock_net(skb->sk);
+ pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
+ __func__, __LINE__);
+ BUG();
+#else
+ return dev_net(skb->dev ? : skb_dst(skb)->dev);
+#endif
+#else
+ return &init_net;
+#endif
+}
+
+static inline struct net *skb_sknet(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+#ifdef CONFIG_IP_VS_DEBUG
+ /* Start with the most likely hit */
+ if (likely(skb->sk && skb->sk->sk_net))
+ return sock_net(skb->sk);
+ WARN(skb->dev, "Maybe skb_net should be used instead in %s() line:%d\n",
+ __func__, __LINE__);
+ if (likely(skb->dev && skb->dev->nd_net))
+ return dev_net(skb->dev);
+ pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
+ __func__, __LINE__);
+ BUG();
+#else
+ return sock_net(skb->sk);
+#endif
+#else
+ return &init_net;
+#endif
+}
+/*
+ * This one needed for single_open_net since net is stored directly in
+ * private not as a struct i.e. seq_file_net cant be used.
+ */
+static inline struct net *seq_file_single_net(struct seq_file *seq)
+{
+#ifdef CONFIG_NET_NS
+ return (struct net *)seq->private;
+#else
+ return &init_net;
+#endif
+}
/* Connections' size value needed by ip_vs_ctl.c */
extern int ip_vs_conn_tab_size;
@@ -258,6 +332,23 @@ struct ip_vs_seq {
before last resized pkt */
};
+/*
+ * counters per cpu
+ */
+struct ip_vs_counters {
+ __u32 conns; /* connections scheduled */
+ __u32 inpkts; /* incoming packets */
+ __u32 outpkts; /* outgoing packets */
+ __u64 inbytes; /* incoming bytes */
+ __u64 outbytes; /* outgoing bytes */
+};
+/*
+ * Stats per cpu
+ */
+struct ip_vs_cpu_stats {
+ struct ip_vs_counters ustats;
+ struct u64_stats_sync syncp;
+};
/*
* IPVS statistics objects
@@ -279,17 +370,34 @@ struct ip_vs_estimator {
};
struct ip_vs_stats {
- struct ip_vs_stats_user ustats; /* statistics */
+ struct ip_vs_stats_user ustats; /* statistics */
struct ip_vs_estimator est; /* estimator */
-
- spinlock_t lock; /* spin lock */
+ struct ip_vs_cpu_stats *cpustats; /* per cpu counters */
+ spinlock_t lock; /* spin lock */
};
+/*
+ * Helper Macros for per cpu
+ * ipvs->tot_stats->ustats.count
+ */
+#define IPVS_STAT_INC(ipvs, count) \
+ __this_cpu_inc((ipvs)->ustats->count)
+
+#define IPVS_STAT_ADD(ipvs, count, value) \
+ do {\
+ write_seqcount_begin(per_cpu_ptr((ipvs)->ustats_seq, \
+ raw_smp_processor_id())); \
+ __this_cpu_add((ipvs)->ustats->count, value); \
+ write_seqcount_end(per_cpu_ptr((ipvs)->ustats_seq, \
+ raw_smp_processor_id())); \
+ } while (0)
+
struct dst_entry;
struct iphdr;
struct ip_vs_conn;
struct ip_vs_app;
struct sk_buff;
+struct ip_vs_proto_data;
struct ip_vs_protocol {
struct ip_vs_protocol *next;
@@ -297,21 +405,22 @@ struct ip_vs_protocol {
u16 protocol;
u16 num_states;
int dont_defrag;
- atomic_t appcnt; /* counter of proto app incs */
- int *timeout_table; /* protocol timeout table */
void (*init)(struct ip_vs_protocol *pp);
void (*exit)(struct ip_vs_protocol *pp);
+ void (*init_netns)(struct net *net, struct ip_vs_proto_data *pd);
+
+ void (*exit_netns)(struct net *net, struct ip_vs_proto_data *pd);
+
int (*conn_schedule)(int af, struct sk_buff *skb,
- struct ip_vs_protocol *pp,
+ struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp);
struct ip_vs_conn *
(*conn_in_get)(int af,
const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
unsigned int proto_off,
int inverse);
@@ -319,7 +428,6 @@ struct ip_vs_protocol {
struct ip_vs_conn *
(*conn_out_get)(int af,
const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
unsigned int proto_off,
int inverse);
@@ -337,11 +445,11 @@ struct ip_vs_protocol {
int (*state_transition)(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb,
- struct ip_vs_protocol *pp);
+ struct ip_vs_proto_data *pd);
- int (*register_app)(struct ip_vs_app *inc);
+ int (*register_app)(struct net *net, struct ip_vs_app *inc);
- void (*unregister_app)(struct ip_vs_app *inc);
+ void (*unregister_app)(struct net *net, struct ip_vs_app *inc);
int (*app_conn_bind)(struct ip_vs_conn *cp);
@@ -350,14 +458,26 @@ struct ip_vs_protocol {
int offset,
const char *msg);
- void (*timeout_change)(struct ip_vs_protocol *pp, int flags);
+ void (*timeout_change)(struct ip_vs_proto_data *pd, int flags);
+};
- int (*set_state_timeout)(struct ip_vs_protocol *pp, char *sname, int to);
+/*
+ * protocol data per netns
+ */
+struct ip_vs_proto_data {
+ struct ip_vs_proto_data *next;
+ struct ip_vs_protocol *pp;
+ int *timeout_table; /* protocol timeout table */
+ atomic_t appcnt; /* counter of proto app incs. */
+ struct tcp_states_t *tcp_state_table;
};
-extern struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto);
+extern struct ip_vs_protocol *ip_vs_proto_get(unsigned short proto);
+extern struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net,
+ unsigned short proto);
struct ip_vs_conn_param {
+ struct net *net;
const union nf_inet_addr *caddr;
const union nf_inet_addr *vaddr;
__be16 cport;
@@ -375,16 +495,19 @@ struct ip_vs_conn_param {
*/
struct ip_vs_conn {
struct list_head c_list; /* hashed list heads */
-
+#ifdef CONFIG_NET_NS
+ struct net *net; /* Name space */
+#endif
/* Protocol, addresses and port numbers */
- u16 af; /* address family */
- union nf_inet_addr caddr; /* client address */
- union nf_inet_addr vaddr; /* virtual address */
- union nf_inet_addr daddr; /* destination address */
- volatile __u32 flags; /* status flags */
- __be16 cport;
- __be16 vport;
- __be16 dport;
+ u16 af; /* address family */
+ __be16 cport;
+ __be16 vport;
+ __be16 dport;
+ __u32 fwmark; /* Fire wall mark from skb */
+ union nf_inet_addr caddr; /* client address */
+ union nf_inet_addr vaddr; /* virtual address */
+ union nf_inet_addr daddr; /* destination address */
+ volatile __u32 flags; /* status flags */
__u16 protocol; /* Which protocol (TCP/UDP) */
/* counter and timer */
@@ -422,10 +545,38 @@ struct ip_vs_conn {
struct ip_vs_seq in_seq; /* incoming seq. struct */
struct ip_vs_seq out_seq; /* outgoing seq. struct */
+ const struct ip_vs_pe *pe;
char *pe_data;
__u8 pe_data_len;
};
+/*
+ * To save some memory in conn table when name space is disabled.
+ */
+static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp)
+{
+#ifdef CONFIG_NET_NS
+ return cp->net;
+#else
+ return &init_net;
+#endif
+}
+static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net)
+{
+#ifdef CONFIG_NET_NS
+ cp->net = net;
+#endif
+}
+
+static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp,
+ struct net *net)
+{
+#ifdef CONFIG_NET_NS
+ return cp->net == net;
+#else
+ return 1;
+#endif
+}
/*
* Extended internal versions of struct ip_vs_service_user and
@@ -485,6 +636,7 @@ struct ip_vs_service {
unsigned flags; /* service status flags */
unsigned timeout; /* persistent timeout in ticks */
__be32 netmask; /* grouping granularity */
+ struct net *net;
struct list_head destinations; /* real server d-linked list */
__u32 num_dests; /* number of servers */
@@ -510,8 +662,8 @@ struct ip_vs_dest {
struct list_head d_list; /* for table with all the dests */
u16 af; /* address family */
- union nf_inet_addr addr; /* IP address of the server */
__be16 port; /* port number of the server */
+ union nf_inet_addr addr; /* IP address of the server */
volatile unsigned flags; /* dest status flags */
atomic_t conn_flags; /* flags to copy to conn */
atomic_t weight; /* server weight */
@@ -538,8 +690,8 @@ struct ip_vs_dest {
/* for virtual service */
struct ip_vs_service *svc; /* service it belongs to */
__u16 protocol; /* which protocol (TCP/UDP) */
- union nf_inet_addr vaddr; /* virtual IP address */
__be16 vport; /* virtual port number */
+ union nf_inet_addr vaddr; /* virtual IP address */
__u32 vfwmark; /* firewall mark of service */
};
@@ -674,13 +826,14 @@ enum {
IP_VS_DIR_LAST,
};
-static inline void ip_vs_conn_fill_param(int af, int protocol,
+static inline void ip_vs_conn_fill_param(struct net *net, int af, int protocol,
const union nf_inet_addr *caddr,
__be16 cport,
const union nf_inet_addr *vaddr,
__be16 vport,
struct ip_vs_conn_param *p)
{
+ p->net = net;
p->af = af;
p->protocol = protocol;
p->caddr = caddr;
@@ -695,7 +848,6 @@ struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p);
struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p);
struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
unsigned int proto_off,
int inverse);
@@ -703,7 +855,6 @@ struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p);
struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
unsigned int proto_off,
int inverse);
@@ -719,14 +870,14 @@ extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p,
const union nf_inet_addr *daddr,
__be16 dport, unsigned flags,
- struct ip_vs_dest *dest);
+ struct ip_vs_dest *dest, __u32 fwmark);
extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
extern const char * ip_vs_state_name(__u16 proto, int state);
-extern void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
+extern void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp);
extern int ip_vs_check_template(struct ip_vs_conn *ct);
-extern void ip_vs_random_dropentry(void);
+extern void ip_vs_random_dropentry(struct net *net);
extern int ip_vs_conn_init(void);
extern void ip_vs_conn_cleanup(void);
@@ -796,12 +947,12 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
* (from ip_vs_app.c)
*/
#define IP_VS_APP_MAX_PORTS 8
-extern int register_ip_vs_app(struct ip_vs_app *app);
-extern void unregister_ip_vs_app(struct ip_vs_app *app);
+extern int register_ip_vs_app(struct net *net, struct ip_vs_app *app);
+extern void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
extern int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
extern void ip_vs_unbind_app(struct ip_vs_conn *cp);
-extern int
-register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port);
+extern int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app,
+ __u16 proto, __u16 port);
extern int ip_vs_app_inc_get(struct ip_vs_app *inc);
extern void ip_vs_app_inc_put(struct ip_vs_app *inc);
@@ -814,15 +965,27 @@ void ip_vs_bind_pe(struct ip_vs_service *svc, struct ip_vs_pe *pe);
void ip_vs_unbind_pe(struct ip_vs_service *svc);
int register_ip_vs_pe(struct ip_vs_pe *pe);
int unregister_ip_vs_pe(struct ip_vs_pe *pe);
-extern struct ip_vs_pe *ip_vs_pe_get(const char *name);
-extern void ip_vs_pe_put(struct ip_vs_pe *pe);
+struct ip_vs_pe *ip_vs_pe_getbyname(const char *name);
+struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name);
+
+static inline void ip_vs_pe_get(const struct ip_vs_pe *pe)
+{
+ if (pe && pe->module)
+ __module_get(pe->module);
+}
+
+static inline void ip_vs_pe_put(const struct ip_vs_pe *pe)
+{
+ if (pe && pe->module)
+ module_put(pe->module);
+}
/*
* IPVS protocol functions (from ip_vs_proto.c)
*/
extern int ip_vs_protocol_init(void);
extern void ip_vs_protocol_cleanup(void);
-extern void ip_vs_protocol_timeout_change(int flags);
+extern void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
extern int *ip_vs_create_timeout_table(int *table, int size);
extern int
ip_vs_set_state_timeout(int *table, int num, const char *const *names,
@@ -852,26 +1015,21 @@ extern struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name);
extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
extern struct ip_vs_conn *
ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
- struct ip_vs_protocol *pp, int *ignored);
+ struct ip_vs_proto_data *pd, int *ignored);
extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
- struct ip_vs_protocol *pp);
+ struct ip_vs_proto_data *pd);
/*
* IPVS control data and functions (from ip_vs_ctl.c)
*/
-extern int sysctl_ip_vs_cache_bypass;
-extern int sysctl_ip_vs_expire_nodest_conn;
-extern int sysctl_ip_vs_expire_quiescent_template;
-extern int sysctl_ip_vs_sync_threshold[2];
-extern int sysctl_ip_vs_nat_icmp_send;
-extern int sysctl_ip_vs_conntrack;
-extern int sysctl_ip_vs_snat_reroute;
extern struct ip_vs_stats ip_vs_stats;
extern const struct ctl_path net_vs_ctl_path[];
+extern int sysctl_ip_vs_sync_ver;
+extern void ip_vs_sync_switch_mode(struct net *net, int mode);
extern struct ip_vs_service *
-ip_vs_service_get(int af, __u32 fwmark, __u16 protocol,
+ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol,
const union nf_inet_addr *vaddr, __be16 vport);
static inline void ip_vs_service_put(struct ip_vs_service *svc)
@@ -880,7 +1038,7 @@ static inline void ip_vs_service_put(struct ip_vs_service *svc)
}
extern struct ip_vs_dest *
-ip_vs_lookup_real_service(int af, __u16 protocol,
+ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol,
const union nf_inet_addr *daddr, __be16 dport);
extern int ip_vs_use_count_inc(void);
@@ -888,8 +1046,9 @@ extern void ip_vs_use_count_dec(void);
extern int ip_vs_control_init(void);
extern void ip_vs_control_cleanup(void);
extern struct ip_vs_dest *
-ip_vs_find_dest(int af, const union nf_inet_addr *daddr, __be16 dport,
- const union nf_inet_addr *vaddr, __be16 vport, __u16 protocol);
+ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr,
+ __be16 dport, const union nf_inet_addr *vaddr, __be16 vport,
+ __u16 protocol, __u32 fwmark);
extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp);
@@ -897,14 +1056,12 @@ extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp);
* IPVS sync daemon data and function prototypes
* (from ip_vs_sync.c)
*/
-extern volatile int ip_vs_sync_state;
-extern volatile int ip_vs_master_syncid;
-extern volatile int ip_vs_backup_syncid;
-extern char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-extern char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-extern int start_sync_thread(int state, char *mcast_ifn, __u8 syncid);
-extern int stop_sync_thread(int state);
-extern void ip_vs_sync_conn(struct ip_vs_conn *cp);
+extern int start_sync_thread(struct net *net, int state, char *mcast_ifn,
+ __u8 syncid);
+extern int stop_sync_thread(struct net *net, int state);
+extern void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp);
+extern int ip_vs_sync_init(void);
+extern void ip_vs_sync_cleanup(void);
/*
@@ -912,8 +1069,8 @@ extern void ip_vs_sync_conn(struct ip_vs_conn *cp);
*/
extern int ip_vs_estimator_init(void);
extern void ip_vs_estimator_cleanup(void);
-extern void ip_vs_new_estimator(struct ip_vs_stats *stats);
-extern void ip_vs_kill_estimator(struct ip_vs_stats *stats);
+extern void ip_vs_new_estimator(struct net *net, struct ip_vs_stats *stats);
+extern void ip_vs_kill_estimator(struct net *net, struct ip_vs_stats *stats);
extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
/*
@@ -952,14 +1109,14 @@ extern int ip_vs_icmp_xmit_v6
* we are loaded. Just set ip_vs_drop_rate to 'n' and
* we start to drop 1/rate of the packets
*/
-extern int ip_vs_drop_rate;
-extern int ip_vs_drop_counter;
-static __inline__ int ip_vs_todrop(void)
+static inline int ip_vs_todrop(struct netns_ipvs *ipvs)
{
- if (!ip_vs_drop_rate) return 0;
- if (--ip_vs_drop_counter > 0) return 0;
- ip_vs_drop_counter = ip_vs_drop_rate;
+ if (!ipvs->drop_rate)
+ return 0;
+ if (--ipvs->drop_counter > 0)
+ return 0;
+ ipvs->drop_counter = ipvs->drop_rate;
return 1;
}
@@ -1047,9 +1204,9 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
* Netfilter connection tracking
* (from ip_vs_nfct.c)
*/
-static inline int ip_vs_conntrack_enabled(void)
+static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
{
- return sysctl_ip_vs_conntrack;
+ return ipvs->sysctl_conntrack;
}
extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
@@ -1062,7 +1219,7 @@ extern void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp);
#else
-static inline int ip_vs_conntrack_enabled(void)
+static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
{
return 0;
}
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 62c0ce2..8fcd169 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -341,6 +341,9 @@ struct ieee80211_bss_conf {
* the off-channel channel when a remain-on-channel offload is done
* in hardware -- normal packets still flow and are expected to be
* handled properly by the device.
+ * @IEEE80211_TX_INTFL_TKIP_MIC_FAILURE: Marks this packet to be used for TKIP
+ * testing. It will be sent out with incorrect Michael MIC key to allow
+ * TKIP countermeasures to be tested.
*
* Note: If you have to add new flags to the enumeration, then don't
* forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary.
@@ -370,6 +373,7 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_CTL_LDPC = BIT(22),
IEEE80211_TX_CTL_STBC = BIT(23) | BIT(24),
IEEE80211_TX_CTL_TX_OFFCHAN = BIT(25),
+ IEEE80211_TX_INTFL_TKIP_MIC_FAILURE = BIT(26),
};
#define IEEE80211_TX_CTL_STBC_SHIFT 23
@@ -1069,6 +1073,13 @@ enum ieee80211_tkip_key_type {
* to decrypt group addressed frames, then IBSS RSN support is still
* possible but software crypto will be used. Advertise the wiphy flag
* only in that case.
+ *
+ * @IEEE80211_HW_AP_LINK_PS: When operating in AP mode the device
+ * autonomously manages the PS status of connected stations. When
+ * this flag is set mac80211 will not trigger PS mode for connected
+ * stations based on the PM bit of incoming frames.
+ * Use ieee80211_start_ps()/ieee8021_end_ps() to manually configure
+ * the PS mode of connected stations.
*/
enum ieee80211_hw_flags {
IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
@@ -1093,6 +1104,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_CONNECTION_MONITOR = 1<<19,
IEEE80211_HW_SUPPORTS_CQM_RSSI = 1<<20,
IEEE80211_HW_SUPPORTS_PER_STA_GTK = 1<<21,
+ IEEE80211_HW_AP_LINK_PS = 1<<22,
};
/**
@@ -1147,6 +1159,17 @@ enum ieee80211_hw_flags {
* @napi_weight: weight used for NAPI polling. You must specify an
* appropriate value here if a napi_poll operation is provided
* by your driver.
+
+ * @max_rx_aggregation_subframes: maximum buffer size (number of
+ * sub-frames) to be used for A-MPDU block ack receiver
+ * aggregation.
+ * This is only relevant if the device has restrictions on the
+ * number of subframes, if it relies on mac80211 to do reordering
+ * it shouldn't be set.
+ *
+ * @max_tx_aggregation_subframes: maximum number of subframes in an
+ * aggregate an HT driver will transmit, used by the peer as a
+ * hint to size its reorder buffer.
*/
struct ieee80211_hw {
struct ieee80211_conf conf;
@@ -1165,6 +1188,8 @@ struct ieee80211_hw {
u8 max_rates;
u8 max_report_rates;
u8 max_rate_tries;
+ u8 max_rx_aggregation_subframes;
+ u8 max_tx_aggregation_subframes;
};
/**
@@ -1688,7 +1713,9 @@ enum ieee80211_ampdu_mlme_action {
* station, AP, IBSS/WDS/mesh peer etc. This callback can sleep.
*
* @sta_notify: Notifies low level driver about power state transition of an
- * associated station, AP, IBSS/WDS/mesh peer etc. Must be atomic.
+ * associated station, AP, IBSS/WDS/mesh peer etc. For a VIF operating
+ * in AP mode, this callback will not be called when the flag
+ * %IEEE80211_HW_AP_LINK_PS is set. Must be atomic.
*
* @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
* bursting) for a hardware TX queue.
@@ -1723,6 +1750,10 @@ enum ieee80211_ampdu_mlme_action {
* ieee80211_ampdu_mlme_action. Starting sequence number (@ssn)
* is the first frame we expect to perform the action on. Notice
* that TX/RX_STOP can pass NULL for this parameter.
+ * The @buf_size parameter is only valid when the action is set to
+ * %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder
+ * buffer size (number of subframes) for this session -- aggregates
+ * containing more subframes than this may not be transmitted to the peer.
* Returns a negative error code on failure.
* The callback can sleep.
*
@@ -1825,7 +1856,8 @@ struct ieee80211_ops {
int (*ampdu_action)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size);
int (*get_survey)(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
void (*rfkill_poll)(struct ieee80211_hw *hw);
@@ -2113,6 +2145,48 @@ static inline void ieee80211_rx_ni(struct ieee80211_hw *hw,
local_bh_enable();
}
+/**
+ * ieee80211_sta_ps_transition - PS transition for connected sta
+ *
+ * When operating in AP mode with the %IEEE80211_HW_AP_LINK_PS
+ * flag set, use this function to inform mac80211 about a connected station
+ * entering/leaving PS mode.
+ *
+ * This function may not be called in IRQ context or with softirqs enabled.
+ *
+ * Calls to this function for a single hardware must be synchronized against
+ * each other.
+ *
+ * The function returns -EINVAL when the requested PS mode is already set.
+ *
+ * @sta: currently connected sta
+ * @start: start or stop PS
+ */
+int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start);
+
+/**
+ * ieee80211_sta_ps_transition_ni - PS transition for connected sta
+ * (in process context)
+ *
+ * Like ieee80211_sta_ps_transition() but can be called in process context
+ * (internally disables bottom halves). Concurrent call restriction still
+ * applies.
+ *
+ * @sta: currently connected sta
+ * @start: start or stop PS
+ */
+static inline int ieee80211_sta_ps_transition_ni(struct ieee80211_sta *sta,
+ bool start)
+{
+ int ret;
+
+ local_bh_disable();
+ ret = ieee80211_sta_ps_transition(sta, start);
+ local_bh_enable();
+
+ return ret;
+}
+
/*
* The TX headroom reserved by mac80211 for its own tx_status functions.
* This is enough for the radiotap header.
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 1bf812b..b3b4a34 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -20,6 +20,7 @@
#include <net/netns/conntrack.h>
#endif
#include <net/netns/xfrm.h>
+#include <net/netns/ip_vs.h>
struct proc_dir_entry;
struct net_device;
@@ -94,6 +95,7 @@ struct net {
#ifdef CONFIG_XFRM
struct netns_xfrm xfrm;
#endif
+ struct netns_ipvs *ipvs;
};
diff --git a/include/net/netevent.h b/include/net/netevent.h
index e82b7ba..22b239c 100644
--- a/include/net/netevent.h
+++ b/include/net/netevent.h
@@ -21,7 +21,6 @@ struct netevent_redirect {
enum netevent_notif_type {
NETEVENT_NEIGH_UPDATE = 1, /* arg is struct neighbour ptr */
- NETEVENT_PMTU_UPDATE, /* arg is struct dst_entry ptr */
NETEVENT_REDIRECT, /* arg is struct netevent_redirect ptr */
};
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index d85cff1..d0d1337 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -50,11 +50,24 @@ union nf_conntrack_expect_proto {
/* per conntrack: application helper private data */
union nf_conntrack_help {
/* insert conntrack helper private data (master) here */
+#if defined(CONFIG_NF_CONNTRACK_FTP) || defined(CONFIG_NF_CONNTRACK_FTP_MODULE)
struct nf_ct_ftp_master ct_ftp_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_PPTP) || \
+ defined(CONFIG_NF_CONNTRACK_PPTP_MODULE)
struct nf_ct_pptp_master ct_pptp_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_H323) || \
+ defined(CONFIG_NF_CONNTRACK_H323_MODULE)
struct nf_ct_h323_master ct_h323_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_SANE) || \
+ defined(CONFIG_NF_CONNTRACK_SANE_MODULE)
struct nf_ct_sane_master ct_sane_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_SIP) || defined(CONFIG_NF_CONNTRACK_SIP_MODULE)
struct nf_ct_sip_master ct_sip_info;
+#endif
};
#include <linux/types.h>
@@ -116,14 +129,14 @@ struct nf_conn {
u_int32_t secmark;
#endif
- /* Storage reserved for other modules: */
- union nf_conntrack_proto proto;
-
/* Extensions */
struct nf_ct_ext *ext;
#ifdef CONFIG_NET_NS
struct net *ct_net;
#endif
+
+ /* Storage reserved for other modules, must be the last member */
+ union nf_conntrack_proto proto;
};
static inline struct nf_conn *
@@ -189,9 +202,9 @@ extern void nf_ct_l3proto_module_put(unsigned short l3proto);
* Allocate a hashtable of hlist_head (if nulls == 0),
* or hlist_nulls_head (if nulls == 1)
*/
-extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls);
+extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
-extern void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size);
+extern void nf_ct_free_hashtable(void *hash, unsigned int size);
extern struct nf_conntrack_tuple_hash *
__nf_conntrack_find(struct net *net, u16 zone,
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 349cefe..4283508 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -23,12 +23,17 @@ struct nf_conntrack_ecache {
static inline struct nf_conntrack_ecache *
nf_ct_ecache_find(const struct nf_conn *ct)
{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
return nf_ct_ext_find(ct, NF_CT_EXT_ECACHE);
+#else
+ return NULL;
+#endif
}
static inline struct nf_conntrack_ecache *
nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *e;
@@ -45,6 +50,9 @@ nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
e->expmask = expmask;
}
return e;
+#else
+ return NULL;
+#endif
};
#ifdef CONFIG_NF_CONNTRACK_EVENTS
@@ -59,7 +67,7 @@ struct nf_ct_event_notifier {
int (*fcn)(unsigned int events, struct nf_ct_event *item);
};
-extern struct nf_ct_event_notifier *nf_conntrack_event_cb;
+extern struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
extern int nf_conntrack_register_notifier(struct nf_ct_event_notifier *nb);
extern void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *nb);
@@ -156,7 +164,7 @@ struct nf_exp_event_notifier {
int (*fcn)(unsigned int events, struct nf_exp_event *item);
};
-extern struct nf_exp_event_notifier *nf_expect_event_cb;
+extern struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
extern int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *nb);
extern void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *nb);
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 0772d29..2dcf317 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -7,10 +7,19 @@
enum nf_ct_ext_id {
NF_CT_EXT_HELPER,
+#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE)
NF_CT_EXT_NAT,
+#endif
NF_CT_EXT_ACCT,
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
NF_CT_EXT_ECACHE,
+#endif
+#ifdef CONFIG_NF_CONNTRACK_ZONES
NF_CT_EXT_ZONE,
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ NF_CT_EXT_TSTAMP,
+#endif
NF_CT_EXT_NUM,
};
@@ -19,6 +28,7 @@ enum nf_ct_ext_id {
#define NF_CT_EXT_ACCT_TYPE struct nf_conn_counter
#define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache
#define NF_CT_EXT_ZONE_TYPE struct nf_conntrack_zone
+#define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp
/* Extensions: optional stuff which isn't permanently in struct. */
struct nf_ct_ext {
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 32c305d..f1c1311 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -63,4 +63,10 @@ static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct)
extern int nf_conntrack_helper_init(void);
extern void nf_conntrack_helper_fini(void);
+extern int nf_conntrack_broadcast_help(struct sk_buff *skb,
+ unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int timeout);
+
#endif /*_NF_CONNTRACK_HELPER_H*/
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index a754761..e8010f4 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -73,7 +73,7 @@ struct nf_conntrack_l3proto {
struct module *me;
};
-extern struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX];
+extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX];
/* Protocol registration. */
extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto);
diff --git a/include/net/netfilter/nf_conntrack_timestamp.h b/include/net/netfilter/nf_conntrack_timestamp.h
new file mode 100644
index 0000000..fc9c82b
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_timestamp.h
@@ -0,0 +1,65 @@
+#ifndef _NF_CONNTRACK_TSTAMP_H
+#define _NF_CONNTRACK_TSTAMP_H
+
+#include <net/net_namespace.h>
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+
+struct nf_conn_tstamp {
+ u_int64_t start;
+ u_int64_t stop;
+};
+
+static inline
+struct nf_conn_tstamp *nf_conn_tstamp_find(const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ return nf_ct_ext_find(ct, NF_CT_EXT_TSTAMP);
+#else
+ return NULL;
+#endif
+}
+
+static inline
+struct nf_conn_tstamp *nf_ct_tstamp_ext_add(struct nf_conn *ct, gfp_t gfp)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ struct net *net = nf_ct_net(ct);
+
+ if (!net->ct.sysctl_tstamp)
+ return NULL;
+
+ return nf_ct_ext_add(ct, NF_CT_EXT_TSTAMP, gfp);
+#else
+ return NULL;
+#endif
+};
+
+static inline bool nf_ct_tstamp_enabled(struct net *net)
+{
+ return net->ct.sysctl_tstamp != 0;
+}
+
+static inline void nf_ct_set_tstamp(struct net *net, bool enable)
+{
+ net->ct.sysctl_tstamp = enable;
+}
+
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+extern int nf_conntrack_tstamp_init(struct net *net);
+extern void nf_conntrack_tstamp_fini(struct net *net);
+#else
+static inline int nf_conntrack_tstamp_init(struct net *net)
+{
+ return 0;
+}
+
+static inline void nf_conntrack_tstamp_fini(struct net *net)
+{
+ return;
+}
+#endif /* CONFIG_NF_CONNTRACK_TIMESTAMP */
+
+#endif /* _NF_CONNTRACK_TSTAMP_H */
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index f5f09f03..aff80b1 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -56,7 +56,9 @@ struct nf_nat_multi_range_compat {
/* per conntrack: nat application helper private data */
union nf_conntrack_nat_help {
/* insert nat helper private data here */
+#if defined(CONFIG_NF_NAT_PPTP) || defined(CONFIG_NF_NAT_PPTP_MODULE)
struct nf_nat_pptp nat_pptp_info;
+#endif
};
struct nf_conn;
@@ -84,7 +86,11 @@ extern int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct)
{
+#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE)
return nf_ct_ext_find(ct, NF_CT_EXT_NAT);
+#else
+ return NULL;
+#endif
}
#else /* !__KERNEL__: iptables wants this to compile. */
diff --git a/include/net/netfilter/nf_nat_core.h b/include/net/netfilter/nf_nat_core.h
index 33602ab..3dc7b98 100644
--- a/include/net/netfilter/nf_nat_core.h
+++ b/include/net/netfilter/nf_nat_core.h
@@ -21,9 +21,9 @@ static inline int nf_nat_initialized(struct nf_conn *ct,
enum nf_nat_manip_type manip)
{
if (manip == IP_NAT_MANIP_SRC)
- return test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
+ return ct->status & IPS_SRC_NAT_DONE;
else
- return test_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
+ return ct->status & IPS_DST_NAT_DONE;
}
struct nlattr;
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 373f1a9..8a3906a 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -856,18 +856,27 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
#define NLA_PUT_BE16(skb, attrtype, value) \
NLA_PUT_TYPE(skb, __be16, attrtype, value)
+#define NLA_PUT_NET16(skb, attrtype, value) \
+ NLA_PUT_BE16(skb, attrtype | NLA_F_NET_BYTEORDER, value)
+
#define NLA_PUT_U32(skb, attrtype, value) \
NLA_PUT_TYPE(skb, u32, attrtype, value)
#define NLA_PUT_BE32(skb, attrtype, value) \
NLA_PUT_TYPE(skb, __be32, attrtype, value)
+#define NLA_PUT_NET32(skb, attrtype, value) \
+ NLA_PUT_BE32(skb, attrtype | NLA_F_NET_BYTEORDER, value)
+
#define NLA_PUT_U64(skb, attrtype, value) \
NLA_PUT_TYPE(skb, u64, attrtype, value)
#define NLA_PUT_BE64(skb, attrtype, value) \
NLA_PUT_TYPE(skb, __be64, attrtype, value)
+#define NLA_PUT_NET64(skb, attrtype, value) \
+ NLA_PUT_BE64(skb, attrtype | NLA_F_NET_BYTEORDER, value)
+
#define NLA_PUT_STRING(skb, attrtype, value) \
NLA_PUT(skb, attrtype, strlen(value) + 1, value)
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index d4958d4..341eb08 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -21,15 +21,15 @@ struct netns_ct {
int sysctl_events;
unsigned int sysctl_events_retry_timeout;
int sysctl_acct;
+ int sysctl_tstamp;
int sysctl_checksum;
unsigned int sysctl_log_invalid; /* Log invalid packets */
#ifdef CONFIG_SYSCTL
struct ctl_table_header *sysctl_header;
struct ctl_table_header *acct_sysctl_header;
+ struct ctl_table_header *tstamp_sysctl_header;
struct ctl_table_header *event_sysctl_header;
#endif
- int hash_vmalloc;
- int expect_vmalloc;
char *slabname;
};
#endif
diff --git a/include/net/netns/ip_vs.h b/include/net/netns/ip_vs.h
new file mode 100644
index 0000000..259ebac
--- /dev/null
+++ b/include/net/netns/ip_vs.h
@@ -0,0 +1,143 @@
+/*
+ * IP Virtual Server
+ * Data structure for network namspace
+ *
+ */
+
+#ifndef IP_VS_H_
+#define IP_VS_H_
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/list_nulls.h>
+#include <linux/ip_vs.h>
+#include <asm/atomic.h>
+#include <linux/in.h>
+
+struct ip_vs_stats;
+struct ip_vs_sync_buff;
+struct ctl_table_header;
+
+struct netns_ipvs {
+ int gen; /* Generation */
+ /*
+ * Hash table: for real service lookups
+ */
+ #define IP_VS_RTAB_BITS 4
+ #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
+ #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
+
+ struct list_head rs_table[IP_VS_RTAB_SIZE];
+ /* ip_vs_app */
+ struct list_head app_list;
+ struct mutex app_mutex;
+ struct lock_class_key app_key; /* mutex debuging */
+
+ /* ip_vs_proto */
+ #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */
+ struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE];
+ /* ip_vs_proto_tcp */
+#ifdef CONFIG_IP_VS_PROTO_TCP
+ #define TCP_APP_TAB_BITS 4
+ #define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS)
+ #define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1)
+ struct list_head tcp_apps[TCP_APP_TAB_SIZE];
+ spinlock_t tcp_app_lock;
+#endif
+ /* ip_vs_proto_udp */
+#ifdef CONFIG_IP_VS_PROTO_UDP
+ #define UDP_APP_TAB_BITS 4
+ #define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS)
+ #define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1)
+ struct list_head udp_apps[UDP_APP_TAB_SIZE];
+ spinlock_t udp_app_lock;
+#endif
+ /* ip_vs_proto_sctp */
+#ifdef CONFIG_IP_VS_PROTO_SCTP
+ #define SCTP_APP_TAB_BITS 4
+ #define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS)
+ #define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1)
+ /* Hash table for SCTP application incarnations */
+ struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
+ spinlock_t sctp_app_lock;
+#endif
+ /* ip_vs_conn */
+ atomic_t conn_count; /* connection counter */
+
+ /* ip_vs_ctl */
+ struct ip_vs_stats *tot_stats; /* Statistics & est. */
+ struct ip_vs_cpu_stats __percpu *cpustats; /* Stats per cpu */
+ seqcount_t *ustats_seq; /* u64 read retry */
+
+ int num_services; /* no of virtual services */
+ /* 1/rate drop and drop-entry variables */
+ struct delayed_work defense_work; /* Work handler */
+ int drop_rate;
+ int drop_counter;
+ atomic_t dropentry;
+ /* locks in ctl.c */
+ spinlock_t dropentry_lock; /* drop entry handling */
+ spinlock_t droppacket_lock; /* drop packet handling */
+ spinlock_t securetcp_lock; /* state and timeout tables */
+ rwlock_t rs_lock; /* real services table */
+ /* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */
+ struct lock_class_key ctl_key; /* ctl_mutex debuging */
+ /* Trash for destinations */
+ struct list_head dest_trash;
+ /* Service counters */
+ atomic_t ftpsvc_counter;
+ atomic_t nullsvc_counter;
+
+ /* sys-ctl struct */
+ struct ctl_table_header *sysctl_hdr;
+ struct ctl_table *sysctl_tbl;
+ /* sysctl variables */
+ int sysctl_amemthresh;
+ int sysctl_am_droprate;
+ int sysctl_drop_entry;
+ int sysctl_drop_packet;
+ int sysctl_secure_tcp;
+#ifdef CONFIG_IP_VS_NFCT
+ int sysctl_conntrack;
+#endif
+ int sysctl_snat_reroute;
+ int sysctl_sync_ver;
+ int sysctl_cache_bypass;
+ int sysctl_expire_nodest_conn;
+ int sysctl_expire_quiescent_template;
+ int sysctl_sync_threshold[2];
+ int sysctl_nat_icmp_send;
+
+ /* ip_vs_lblc */
+ int sysctl_lblc_expiration;
+ struct ctl_table_header *lblc_ctl_header;
+ struct ctl_table *lblc_ctl_table;
+ /* ip_vs_lblcr */
+ int sysctl_lblcr_expiration;
+ struct ctl_table_header *lblcr_ctl_header;
+ struct ctl_table *lblcr_ctl_table;
+ /* ip_vs_est */
+ struct list_head est_list; /* estimator list */
+ spinlock_t est_lock;
+ struct timer_list est_timer; /* Estimation timer */
+ /* ip_vs_sync */
+ struct list_head sync_queue;
+ spinlock_t sync_lock;
+ struct ip_vs_sync_buff *sync_buff;
+ spinlock_t sync_buff_lock;
+ struct sockaddr_in sync_mcast_addr;
+ struct task_struct *master_thread;
+ struct task_struct *backup_thread;
+ int send_mesg_maxlen;
+ int recv_mesg_maxlen;
+ volatile int sync_state;
+ volatile int master_syncid;
+ volatile int backup_syncid;
+ /* multicast interface name */
+ char master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
+ char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
+ /* net name space ptr */
+ struct net *net; /* Needed by timer routines */
+};
+
+#endif /* IP_VS_H_ */
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index d68c3f1..e2e2ef5 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -43,7 +43,6 @@ struct netns_ipv4 {
struct xt_table *nat_table;
struct hlist_head *nat_bysource;
unsigned int nat_htable_size;
- int nat_vmalloced;
#endif
int sysctl_icmp_echo_ignore_all;
diff --git a/include/net/protocol.h b/include/net/protocol.h
index dc07495..6f7eb80 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -38,7 +38,7 @@ struct net_protocol {
void (*err_handler)(struct sk_buff *skb, u32 info);
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
- int features);
+ u32 features);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb);
@@ -57,7 +57,7 @@ struct inet6_protocol {
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
- int features);
+ u32 features);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb);
diff --git a/include/net/route.h b/include/net/route.h
index 93e10c4..bf790c1 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -49,6 +49,7 @@
struct fib_nh;
struct inet_peer;
+struct fib_info;
struct rtable {
struct dst_entry dst;
@@ -68,7 +69,9 @@ struct rtable {
/* Miscellaneous cached information */
__be32 rt_spec_dst; /* RFC1122 specific destination */
+ u32 rt_peer_genid;
struct inet_peer *peer; /* long-living peer info */
+ struct fib_info *fi; /* for client ref to shared metrics */
};
static inline bool rt_is_input_route(struct rtable *rt)
@@ -180,6 +183,8 @@ static inline int ip_route_connect(struct rtable **rp, __be32 dst,
if (inet_sk(sk)->transparent)
fl.flags |= FLOWI_FLAG_ANYSRC;
+ if (protocol == IPPROTO_TCP)
+ fl.flags |= FLOWI_FLAG_PRECOW_METRICS;
if (!dst || !src) {
err = __ip_route_output_key(net, rp, &fl);
@@ -207,6 +212,8 @@ static inline int ip_route_newports(struct rtable **rp, u8 protocol,
fl.proto = protocol;
if (inet_sk(sk)->transparent)
fl.flags |= FLOWI_FLAG_ANYSRC;
+ if (protocol == IPPROTO_TCP)
+ fl.flags |= FLOWI_FLAG_PRECOW_METRICS;
ip_rt_put(*rp);
*rp = NULL;
security_sk_classify_flow(sk, &fl);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 160a407..16626a0 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -31,10 +31,12 @@ enum qdisc_state_t {
* following bits are only changed while qdisc lock is held
*/
enum qdisc___state_t {
- __QDISC___STATE_RUNNING,
+ __QDISC___STATE_RUNNING = 1,
+ __QDISC___STATE_THROTTLED = 2,
};
struct qdisc_size_table {
+ struct rcu_head rcu;
struct list_head list;
struct tc_sizespec szopts;
int refcnt;
@@ -46,14 +48,13 @@ struct Qdisc {
struct sk_buff * (*dequeue)(struct Qdisc *dev);
unsigned flags;
#define TCQ_F_BUILTIN 1
-#define TCQ_F_THROTTLED 2
-#define TCQ_F_INGRESS 4
-#define TCQ_F_CAN_BYPASS 8
-#define TCQ_F_MQROOT 16
+#define TCQ_F_INGRESS 2
+#define TCQ_F_CAN_BYPASS 4
+#define TCQ_F_MQROOT 8
#define TCQ_F_WARN_NONWC (1 << 16)
int padded;
struct Qdisc_ops *ops;
- struct qdisc_size_table *stab;
+ struct qdisc_size_table __rcu *stab;
struct list_head list;
u32 handle;
u32 parent;
@@ -78,25 +79,43 @@ struct Qdisc {
unsigned long state;
struct sk_buff_head q;
struct gnet_stats_basic_packed bstats;
- unsigned long __state;
+ unsigned int __state;
struct gnet_stats_queue qstats;
struct rcu_head rcu_head;
spinlock_t busylock;
};
-static inline bool qdisc_is_running(struct Qdisc *qdisc)
+static inline bool qdisc_is_running(const struct Qdisc *qdisc)
{
- return test_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+ return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false;
}
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{
- return !__test_and_set_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+ if (qdisc_is_running(qdisc))
+ return false;
+ qdisc->__state |= __QDISC___STATE_RUNNING;
+ return true;
}
static inline void qdisc_run_end(struct Qdisc *qdisc)
{
- __clear_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+ qdisc->__state &= ~__QDISC___STATE_RUNNING;
+}
+
+static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
+{
+ return (qdisc->__state & __QDISC___STATE_THROTTLED) ? true : false;
+}
+
+static inline void qdisc_throttled(struct Qdisc *qdisc)
+{
+ qdisc->__state |= __QDISC___STATE_THROTTLED;
+}
+
+static inline void qdisc_unthrottled(struct Qdisc *qdisc)
+{
+ qdisc->__state &= ~__QDISC___STATE_THROTTLED;
}
struct Qdisc_class_ops {
@@ -331,8 +350,8 @@ extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
struct Qdisc_ops *ops);
extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
struct Qdisc_ops *ops, u32 parentid);
-extern void qdisc_calculate_pkt_len(struct sk_buff *skb,
- struct qdisc_size_table *stab);
+extern void __qdisc_calculate_pkt_len(struct sk_buff *skb,
+ const struct qdisc_size_table *stab);
extern void tcf_destroy(struct tcf_proto *tp);
extern void tcf_destroy_chain(struct tcf_proto **fl);
@@ -411,12 +430,20 @@ enum net_xmit_qdisc_t {
#define net_xmit_drop_count(e) (1)
#endif
-static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
+ const struct Qdisc *sch)
{
#ifdef CONFIG_NET_SCHED
- if (sch->stab)
- qdisc_calculate_pkt_len(skb, sch->stab);
+ struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
+
+ if (stab)
+ __qdisc_calculate_pkt_len(skb, stab);
#endif
+}
+
+static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ qdisc_calculate_pkt_len(skb, sch);
return sch->enqueue(skb, sch);
}
diff --git a/include/net/sock.h b/include/net/sock.h
index bc1cf7d8..e3893a2 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1191,7 +1191,7 @@ extern void sk_filter_release_rcu(struct rcu_head *rcu);
static inline void sk_filter_release(struct sk_filter *fp)
{
if (atomic_dec_and_test(&fp->refcnt))
- call_rcu_bh(&fp->rcu, sk_filter_release_rcu);
+ call_rcu(&fp->rcu, sk_filter_release_rcu);
}
static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 38509f0..adfe6db 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -196,6 +196,9 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
/* TCP thin-stream limits */
#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
+/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */
+#define TCP_INIT_CWND 10
+
extern struct inet_timewait_death_row tcp_death_row;
/* sysctl variables for tcp */
@@ -799,15 +802,6 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
/* Use define here intentionally to get WARN_ON location shown at the caller */
#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
-/*
- * Convert RFC 3390 larger initial window into an equivalent number of packets.
- * This is based on the numbers specified in RFC 5681, 3.1.
- */
-static inline u32 rfc3390_bytes_to_packets(const u32 smss)
-{
- return smss <= 1095 ? 4 : (smss > 2190 ? 2 : 3);
-}
-
extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
@@ -1404,7 +1398,7 @@ extern struct request_sock_ops tcp6_request_sock_ops;
extern void tcp_v4_destroy_sock(struct sock *sk);
extern int tcp_v4_gso_send_check(struct sk_buff *skb);
-extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
+extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features);
extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
struct sk_buff *skb);
extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
diff --git a/include/net/udp.h b/include/net/udp.h
index bb967dd..e82f3a8 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -245,5 +245,5 @@ extern void udp4_proc_exit(void);
extern void udp_init(void);
extern int udp4_ufo_send_check(struct sk_buff *skb);
-extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features);
+extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features);
#endif /* _UDP_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index b9f385d..1f6e8a0 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -36,6 +36,7 @@
#define XFRM_PROTO_ROUTING IPPROTO_ROUTING
#define XFRM_PROTO_DSTOPTS IPPROTO_DSTOPTS
+#define XFRM_ALIGN4(len) (((len) + 3) & ~3)
#define XFRM_ALIGN8(len) (((len) + 7) & ~7)
#define MODULE_ALIAS_XFRM_MODE(family, encap) \
MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
diff --git a/kernel/audit.c b/kernel/audit.c
index e495624..162e88e 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -74,6 +74,8 @@ static int audit_initialized;
int audit_enabled;
int audit_ever_enabled;
+EXPORT_SYMBOL_GPL(audit_enabled);
+
/* Default state when kernel boots without any parameters. */
static int audit_default;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0caa59f..0587c5c 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -134,6 +134,10 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
irq_set_thread_affinity(desc);
}
#endif
+ if (desc->affinity_notify) {
+ kref_get(&desc->affinity_notify->kref);
+ schedule_work(&desc->affinity_notify->work);
+ }
desc->status |= IRQ_AFFINITY_SET;
raw_spin_unlock_irqrestore(&desc->lock, flags);
return 0;
@@ -155,6 +159,79 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
}
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
+static void irq_affinity_notify(struct work_struct *work)
+{
+ struct irq_affinity_notify *notify =
+ container_of(work, struct irq_affinity_notify, work);
+ struct irq_desc *desc = irq_to_desc(notify->irq);
+ cpumask_var_t cpumask;
+ unsigned long flags;
+
+ if (!desc)
+ goto out;
+
+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+ goto out;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+ if (desc->status & IRQ_MOVE_PENDING)
+ cpumask_copy(cpumask, desc->pending_mask);
+ else
+#endif
+ cpumask_copy(cpumask, desc->affinity);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ notify->notify(notify, cpumask);
+
+ free_cpumask_var(cpumask);
+out:
+ kref_put(&notify->kref, notify->release);
+}
+
+/**
+ * irq_set_affinity_notifier - control notification of IRQ affinity changes
+ * @irq: Interrupt for which to enable/disable notification
+ * @notify: Context for notification, or %NULL to disable
+ * notification. Function pointers must be initialised;
+ * the other fields will be initialised by this function.
+ *
+ * Must be called in process context. Notification may only be enabled
+ * after the IRQ is allocated and must be disabled before the IRQ is
+ * freed using free_irq().
+ */
+int
+irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_affinity_notify *old_notify;
+ unsigned long flags;
+
+ /* The release function is promised process context */
+ might_sleep();
+
+ if (!desc)
+ return -EINVAL;
+
+ /* Complete initialisation of *notify */
+ if (notify) {
+ notify->irq = irq;
+ kref_init(&notify->kref);
+ INIT_WORK(&notify->work, irq_affinity_notify);
+ }
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ old_notify = desc->affinity_notify;
+ desc->affinity_notify = notify;
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ if (old_notify)
+ kref_put(&old_notify->kref, old_notify->release);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
+
#ifndef CONFIG_AUTO_IRQ_AFFINITY
/*
* Generic version of the affinity autoselector.
@@ -1004,6 +1081,11 @@ void free_irq(unsigned int irq, void *dev_id)
if (!desc)
return;
+#ifdef CONFIG_SMP
+ if (WARN_ON(desc->affinity_notify))
+ desc->affinity_notify = NULL;
+#endif
+
chip_bus_lock(desc);
kfree(__free_irq(irq, dev_id));
chip_bus_sync_unlock(desc);
diff --git a/lib/Kconfig b/lib/Kconfig
index 0ee67e0..8334342 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -201,6 +201,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS
depends on EXPERIMENTAL && BROKEN
+config CPU_RMAP
+ bool
+ depends on SMP
+
#
# Netlink attribute parsing support is select'ed if needed
#
diff --git a/lib/Makefile b/lib/Makefile
index cbb774f..b73ba01 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -110,6 +110,8 @@ obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
obj-$(CONFIG_AVERAGE) += average.o
+obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
+
hostprogs-y := gen_crc32table
clean-files := crc32table.h
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
new file mode 100644
index 0000000..987acfa
--- /dev/null
+++ b/lib/cpu_rmap.c
@@ -0,0 +1,269 @@
+/*
+ * cpu_rmap.c: CPU affinity reverse-map support
+ * Copyright 2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/cpu_rmap.h>
+#ifdef CONFIG_GENERIC_HARDIRQS
+#include <linux/interrupt.h>
+#endif
+#include <linux/module.h>
+
+/*
+ * These functions maintain a mapping from CPUs to some ordered set of
+ * objects with CPU affinities. This can be seen as a reverse-map of
+ * CPU affinity. However, we do not assume that the object affinities
+ * cover all CPUs in the system. For those CPUs not directly covered
+ * by object affinities, we attempt to find a nearest object based on
+ * CPU topology.
+ */
+
+/**
+ * alloc_cpu_rmap - allocate CPU affinity reverse-map
+ * @size: Number of objects to be mapped
+ * @flags: Allocation flags e.g. %GFP_KERNEL
+ */
+struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
+{
+ struct cpu_rmap *rmap;
+ unsigned int cpu;
+ size_t obj_offset;
+
+ /* This is a silly number of objects, and we use u16 indices. */
+ if (size > 0xffff)
+ return NULL;
+
+ /* Offset of object pointer array from base structure */
+ obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]),
+ sizeof(void *));
+
+ rmap = kzalloc(obj_offset + size * sizeof(rmap->obj[0]), flags);
+ if (!rmap)
+ return NULL;
+
+ rmap->obj = (void **)((char *)rmap + obj_offset);
+
+ /* Initially assign CPUs to objects on a rota, since we have
+ * no idea where the objects are. Use infinite distance, so
+ * any object with known distance is preferable. Include the
+ * CPUs that are not present/online, since we definitely want
+ * any newly-hotplugged CPUs to have some object assigned.
+ */
+ for_each_possible_cpu(cpu) {
+ rmap->near[cpu].index = cpu % size;
+ rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
+ }
+
+ rmap->size = size;
+ return rmap;
+}
+EXPORT_SYMBOL(alloc_cpu_rmap);
+
+/* Reevaluate nearest object for given CPU, comparing with the given
+ * neighbours at the given distance.
+ */
+static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu,
+ const struct cpumask *mask, u16 dist)
+{
+ int neigh;
+
+ for_each_cpu(neigh, mask) {
+ if (rmap->near[cpu].dist > dist &&
+ rmap->near[neigh].dist <= dist) {
+ rmap->near[cpu].index = rmap->near[neigh].index;
+ rmap->near[cpu].dist = dist;
+ return true;
+ }
+ }
+ return false;
+}
+
+#ifdef DEBUG
+static void debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
+{
+ unsigned index;
+ unsigned int cpu;
+
+ pr_info("cpu_rmap %p, %s:\n", rmap, prefix);
+
+ for_each_possible_cpu(cpu) {
+ index = rmap->near[cpu].index;
+ pr_info("cpu %d -> obj %u (distance %u)\n",
+ cpu, index, rmap->near[cpu].dist);
+ }
+}
+#else
+static inline void
+debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
+{
+}
+#endif
+
+/**
+ * cpu_rmap_add - add object to a rmap
+ * @rmap: CPU rmap allocated with alloc_cpu_rmap()
+ * @obj: Object to add to rmap
+ *
+ * Return index of object.
+ */
+int cpu_rmap_add(struct cpu_rmap *rmap, void *obj)
+{
+ u16 index;
+
+ BUG_ON(rmap->used >= rmap->size);
+ index = rmap->used++;
+ rmap->obj[index] = obj;
+ return index;
+}
+EXPORT_SYMBOL(cpu_rmap_add);
+
+/**
+ * cpu_rmap_update - update CPU rmap following a change of object affinity
+ * @rmap: CPU rmap to update
+ * @index: Index of object whose affinity changed
+ * @affinity: New CPU affinity of object
+ */
+int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
+ const struct cpumask *affinity)
+{
+ cpumask_var_t update_mask;
+ unsigned int cpu;
+
+ if (unlikely(!zalloc_cpumask_var(&update_mask, GFP_KERNEL)))
+ return -ENOMEM;
+
+ /* Invalidate distance for all CPUs for which this used to be
+ * the nearest object. Mark those CPUs for update.
+ */
+ for_each_online_cpu(cpu) {
+ if (rmap->near[cpu].index == index) {
+ rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
+ cpumask_set_cpu(cpu, update_mask);
+ }
+ }
+
+ debug_print_rmap(rmap, "after invalidating old distances");
+
+ /* Set distance to 0 for all CPUs in the new affinity mask.
+ * Mark all CPUs within their NUMA nodes for update.
+ */
+ for_each_cpu(cpu, affinity) {
+ rmap->near[cpu].index = index;
+ rmap->near[cpu].dist = 0;
+ cpumask_or(update_mask, update_mask,
+ cpumask_of_node(cpu_to_node(cpu)));
+ }
+
+ debug_print_rmap(rmap, "after updating neighbours");
+
+ /* Update distances based on topology */
+ for_each_cpu(cpu, update_mask) {
+ if (cpu_rmap_copy_neigh(rmap, cpu,
+ topology_thread_cpumask(cpu), 1))
+ continue;
+ if (cpu_rmap_copy_neigh(rmap, cpu,
+ topology_core_cpumask(cpu), 2))
+ continue;
+ if (cpu_rmap_copy_neigh(rmap, cpu,
+ cpumask_of_node(cpu_to_node(cpu)), 3))
+ continue;
+ /* We could continue into NUMA node distances, but for now
+ * we give up.
+ */
+ }
+
+ debug_print_rmap(rmap, "after copying neighbours");
+
+ free_cpumask_var(update_mask);
+ return 0;
+}
+EXPORT_SYMBOL(cpu_rmap_update);
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+
+/* Glue between IRQ affinity notifiers and CPU rmaps */
+
+struct irq_glue {
+ struct irq_affinity_notify notify;
+ struct cpu_rmap *rmap;
+ u16 index;
+};
+
+/**
+ * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs
+ * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL
+ *
+ * Must be called in process context, before freeing the IRQs, and
+ * without holding any locks required by global workqueue items.
+ */
+void free_irq_cpu_rmap(struct cpu_rmap *rmap)
+{
+ struct irq_glue *glue;
+ u16 index;
+
+ if (!rmap)
+ return;
+
+ for (index = 0; index < rmap->used; index++) {
+ glue = rmap->obj[index];
+ irq_set_affinity_notifier(glue->notify.irq, NULL);
+ }
+ irq_run_affinity_notifiers();
+
+ kfree(rmap);
+}
+EXPORT_SYMBOL(free_irq_cpu_rmap);
+
+static void
+irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
+{
+ struct irq_glue *glue =
+ container_of(notify, struct irq_glue, notify);
+ int rc;
+
+ rc = cpu_rmap_update(glue->rmap, glue->index, mask);
+ if (rc)
+ pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc);
+}
+
+static void irq_cpu_rmap_release(struct kref *ref)
+{
+ struct irq_glue *glue =
+ container_of(ref, struct irq_glue, notify.kref);
+ kfree(glue);
+}
+
+/**
+ * irq_cpu_rmap_add - add an IRQ to a CPU affinity reverse-map
+ * @rmap: The reverse-map
+ * @irq: The IRQ number
+ *
+ * This adds an IRQ affinity notifier that will update the reverse-map
+ * automatically.
+ *
+ * Must be called in process context, after the IRQ is allocated but
+ * before it is bound with request_irq().
+ */
+int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
+{
+ struct irq_glue *glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ int rc;
+
+ if (!glue)
+ return -ENOMEM;
+ glue->notify.notify = irq_cpu_rmap_notify;
+ glue->notify.release = irq_cpu_rmap_release;
+ glue->rmap = rmap;
+ glue->index = cpu_rmap_add(rmap, glue);
+ rc = irq_set_affinity_notifier(irq, &glue->notify);
+ if (rc)
+ kfree(glue);
+ return rc;
+}
+EXPORT_SYMBOL(irq_cpu_rmap_add);
+
+#endif /* CONFIG_GENERIC_HARDIRQS */
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 6e64f7c..7850412 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -327,7 +327,7 @@ static void vlan_sync_address(struct net_device *dev,
static void vlan_transfer_features(struct net_device *dev,
struct net_device *vlandev)
{
- unsigned long old_features = vlandev->features;
+ u32 old_features = vlandev->features;
vlandev->features &= ~dev->vlan_features;
vlandev->features |= dev->features & dev->vlan_features;
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 17c5ba7..29a54cc 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -59,7 +59,6 @@
* safely advertise a maxsize
* of 64k */
-#define P9_RDMA_MAX_SGE (P9_RDMA_MAXSIZE >> PAGE_SHIFT)
/**
* struct p9_trans_rdma - RDMA transport instance
*
diff --git a/net/Kconfig b/net/Kconfig
index 7284062..79cabf1 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -221,6 +221,12 @@ config RPS
depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
default y
+config RFS_ACCEL
+ boolean
+ depends on RPS && GENERIC_HARDIRQS
+ select CPU_RMAP
+ default y
+
config XPS
boolean
depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index d936aec..2de93d0 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -1,5 +1,5 @@
#
-# Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+# Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
#
# Marek Lindner, Simon Wunderlich
#
diff --git a/net/batman-adv/aggregation.c b/net/batman-adv/aggregation.c
index 3850a3e..1997725 100644
--- a/net/batman-adv/aggregation.c
+++ b/net/batman-adv/aggregation.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/aggregation.h b/net/batman-adv/aggregation.h
index 71a91b3..6ce305b 100644
--- a/net/batman-adv/aggregation.h
+++ b/net/batman-adv/aggregation.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c
index 0ae81d0..0e9d435 100644
--- a/net/batman-adv/bat_debugfs.c
+++ b/net/batman-adv/bat_debugfs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -52,7 +52,6 @@ static void emit_log_char(struct debug_log *debug_log, char c)
static int fdebug_log(struct debug_log *debug_log, char *fmt, ...)
{
- int printed_len;
va_list args;
static char debug_log_buf[256];
char *p;
@@ -62,8 +61,7 @@ static int fdebug_log(struct debug_log *debug_log, char *fmt, ...)
spin_lock_bh(&debug_log->lock);
va_start(args, fmt);
- printed_len = vscnprintf(debug_log_buf, sizeof(debug_log_buf),
- fmt, args);
+ vscnprintf(debug_log_buf, sizeof(debug_log_buf), fmt, args);
va_end(args);
for (p = debug_log_buf; *p != 0; p++)
diff --git a/net/batman-adv/bat_debugfs.h b/net/batman-adv/bat_debugfs.h
index 72df532..bc9cda3f 100644
--- a/net/batman-adv/bat_debugfs.h
+++ b/net/batman-adv/bat_debugfs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index cd7bb51..f7b93a0 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/bat_sysfs.h b/net/batman-adv/bat_sysfs.h
index 7f186c0..02f1fa7 100644
--- a/net/batman-adv/bat_sysfs.h
+++ b/net/batman-adv/bat_sysfs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index bbcd8f7..ad2ca92 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index ac54017..769c246 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 0065ffb..429a013 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 4585e65..2aa4391 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index b962982..50d3a59 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index 5e728d0..55e527a 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 4f95777..f2131f4 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -34,6 +34,12 @@
/* protect update critical side of if_list - but not the content */
static DEFINE_SPINLOCK(if_list_lock);
+
+static int batman_skb_recv(struct sk_buff *skb,
+ struct net_device *dev,
+ struct packet_type *ptype,
+ struct net_device *orig_dev);
+
static void hardif_free_rcu(struct rcu_head *rcu)
{
struct batman_if *batman_if;
@@ -549,8 +555,9 @@ out:
/* receive a packet with the batman ethertype coming on a hard
* interface */
-int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *ptype, struct net_device *orig_dev)
+static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype,
+ struct net_device *orig_dev)
{
struct bat_priv *bat_priv;
struct batman_packet *batman_packet;
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 30ec3b8..ad19543 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -35,10 +35,6 @@ struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev);
int hardif_enable_interface(struct batman_if *batman_if, char *iface_name);
void hardif_disable_interface(struct batman_if *batman_if);
void hardif_remove_interfaces(void);
-int batman_skb_recv(struct sk_buff *skb,
- struct net_device *dev,
- struct packet_type *ptype,
- struct net_device *orig_dev);
int hardif_min_mtu(struct net_device *soft_iface);
void update_min_mtu(struct net_device *soft_iface);
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index 26e623e..fa26939 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 09216ad..eae2440 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
@@ -49,11 +49,6 @@ struct hashtable_t {
/* allocates and clears the hash */
struct hashtable_t *hash_new(int size);
-/* remove element if you already found the element you want to delete and don't
- * need the overhead to find it again with hash_remove(). But usually, you
- * don't want to use this function, as it fiddles with hash-internals. */
-void *hash_remove_element(struct hashtable_t *hash, struct element_t *elem);
-
/* free only the hashtable and the hash itself. */
void hash_destroy(struct hashtable_t *hash);
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index ecf6d7f..319a7cc 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -24,7 +24,6 @@
#include <linux/slab.h>
#include "icmp_socket.h"
#include "send.h"
-#include "types.h"
#include "hash.h"
#include "originator.h"
#include "hard-interface.h"
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index bf9b348..462b190 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -22,8 +22,6 @@
#ifndef _NET_BATMAN_ADV_ICMP_SOCKET_H_
#define _NET_BATMAN_ADV_ICMP_SOCKET_H_
-#include "types.h"
-
#define ICMP_SOCKET "socket"
void bat_socket_init(void);
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index b827f6a..06d956c 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -30,7 +30,6 @@
#include "translation-table.h"
#include "hard-interface.h"
#include "gateway_client.h"
-#include "types.h"
#include "vis.h"
#include "hash.h"
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 65106fb..e235d7b 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -22,9 +22,6 @@
#ifndef _NET_BATMAN_ADV_MAIN_H_
#define _NET_BATMAN_ADV_MAIN_H_
-/* Kernel Programming */
-#define LINUX
-
#define DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \
"Simon Wunderlich <siwu@hrz.tu-chemnitz.de>"
#define DRIVER_DESC "B.A.T.M.A.N. advanced"
@@ -54,7 +51,6 @@
#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE)
-#define PACKBUFF_SIZE 2000
#define LOG_BUF_LEN 8192 /* has to be a power of 2 */
#define VIS_INTERVAL 5000 /* 5 seconds */
@@ -96,15 +92,11 @@
#define DBG_ROUTES 2 /* route or hna added / changed / deleted */
#define DBG_ALL 3
-#define LOG_BUF_LEN 8192 /* has to be a power of 2 */
-
/*
* Vis
*/
-/* #define VIS_SUBCLUSTERS_DISABLED */
-
/*
* Kernel headers
*/
@@ -158,13 +150,6 @@ static inline void bat_dbg(char type __always_unused,
}
#endif
-#define bat_warning(net_dev, fmt, arg...) \
- do { \
- struct net_device *_netdev = (net_dev); \
- struct bat_priv *_batpriv = netdev_priv(_netdev); \
- bat_dbg(DBG_ALL, _batpriv, fmt, ## arg); \
- pr_warning("%s: " fmt, _netdev->name, ## arg); \
- } while (0)
#define bat_info(net_dev, fmt, arg...) \
do { \
struct net_device *_netdev = (net_dev); \
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 6b7fb6b..54863c9 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -247,7 +247,7 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
orig_node->hna_buff_len);
/* update bonding candidates, we could have lost
* some candidates. */
- update_bonding_candidates(bat_priv, orig_node);
+ update_bonding_candidates(orig_node);
}
}
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index d474ceb..8019fbd 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 2284e81..e757187 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -50,6 +50,7 @@
/* fragmentation defines */
#define UNI_FRAG_HEAD 0x01
+#define UNI_FRAG_LARGETAIL 0x02
struct batman_packet {
uint8_t packet_type;
diff --git a/net/batman-adv/ring_buffer.c b/net/batman-adv/ring_buffer.c
index defd37c..5bb6a61 100644
--- a/net/batman-adv/ring_buffer.c
+++ b/net/batman-adv/ring_buffer.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/ring_buffer.h b/net/batman-adv/ring_buffer.h
index 6b0cb9a..0395b27 100644
--- a/net/batman-adv/ring_buffer.h
+++ b/net/batman-adv/ring_buffer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 8828edd..8274140 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -28,7 +28,6 @@
#include "icmp_socket.h"
#include "translation-table.h"
#include "originator.h"
-#include "types.h"
#include "ring_buffer.h"
#include "vis.h"
#include "aggregation.h"
@@ -433,8 +432,7 @@ static char count_real_packets(struct ethhdr *ethhdr,
}
/* copy primary address for bonding */
-static void mark_bonding_address(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
+static void mark_bonding_address(struct orig_node *orig_node,
struct orig_node *orig_neigh_node,
struct batman_packet *batman_packet)
@@ -447,8 +445,7 @@ static void mark_bonding_address(struct bat_priv *bat_priv,
}
/* mark possible bond.candidates in the neighbor list */
-void update_bonding_candidates(struct bat_priv *bat_priv,
- struct orig_node *orig_node)
+void update_bonding_candidates(struct orig_node *orig_node)
{
int candidates;
int interference_candidate;
@@ -730,9 +727,8 @@ void receive_bat_packet(struct ethhdr *ethhdr,
update_orig(bat_priv, orig_node, ethhdr, batman_packet,
if_incoming, hna_buff, hna_buff_len, is_duplicate);
- mark_bonding_address(bat_priv, orig_node,
- orig_neigh_node, batman_packet);
- update_bonding_candidates(bat_priv, orig_node);
+ mark_bonding_address(orig_node, orig_neigh_node, batman_packet);
+ update_bonding_candidates(orig_node);
/* is single hop (direct) neighbor */
if (is_single_hop_neigh) {
@@ -810,13 +806,11 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
{
struct orig_node *orig_node;
struct icmp_packet_rr *icmp_packet;
- struct ethhdr *ethhdr;
struct batman_if *batman_if;
int ret;
uint8_t dstaddr[ETH_ALEN];
icmp_packet = (struct icmp_packet_rr *)skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* add data to device queue */
if (icmp_packet->msg_type != ECHO_REQUEST) {
@@ -848,7 +842,6 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
return NET_RX_DROP;
icmp_packet = (struct icmp_packet_rr *)skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
memcpy(icmp_packet->orig,
@@ -866,17 +859,15 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
}
static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
- struct sk_buff *skb, size_t icmp_len)
+ struct sk_buff *skb)
{
struct orig_node *orig_node;
struct icmp_packet *icmp_packet;
- struct ethhdr *ethhdr;
struct batman_if *batman_if;
int ret;
uint8_t dstaddr[ETH_ALEN];
icmp_packet = (struct icmp_packet *)skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* send TTL exceeded if packet is an echo request (traceroute) */
if (icmp_packet->msg_type != ECHO_REQUEST) {
@@ -909,7 +900,6 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
return NET_RX_DROP;
icmp_packet = (struct icmp_packet *) skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
memcpy(icmp_packet->orig,
@@ -978,7 +968,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
/* TTL exceeded */
if (icmp_packet->ttl < 2)
- return recv_icmp_ttl_exceeded(bat_priv, skb, hdr_size);
+ return recv_icmp_ttl_exceeded(bat_priv, skb);
ret = NET_RX_DROP;
@@ -1001,7 +991,6 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
return NET_RX_DROP;
icmp_packet = (struct icmp_packet_rr *)skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* decrement ttl */
icmp_packet->ttl--;
@@ -1193,7 +1182,7 @@ int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
dstaddr);
if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
- 2 * skb->len - hdr_size <= batman_if->net_dev->mtu) {
+ frag_can_reassemble(skb, batman_if->net_dev->mtu)) {
ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index f108f23..a09d16f 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -22,8 +22,6 @@
#ifndef _NET_BATMAN_ADV_ROUTING_H_
#define _NET_BATMAN_ADV_ROUTING_H_
-#include "types.h"
-
void slide_own_bcast_window(struct batman_if *batman_if);
void receive_bat_packet(struct ethhdr *ethhdr,
struct batman_packet *batman_packet,
@@ -42,7 +40,6 @@ int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if);
int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if);
struct neigh_node *find_router(struct bat_priv *bat_priv,
struct orig_node *orig_node, struct batman_if *recv_if);
-void update_bonding_candidates(struct bat_priv *bat_priv,
- struct orig_node *orig_node);
+void update_bonding_candidates(struct orig_node *orig_node);
#endif /* _NET_BATMAN_ADV_ROUTING_H_ */
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index b89b9f7..8314276 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -25,7 +25,6 @@
#include "translation-table.h"
#include "soft-interface.h"
#include "hard-interface.h"
-#include "types.h"
#include "vis.h"
#include "aggregation.h"
#include "gateway_common.h"
@@ -49,7 +48,7 @@ static unsigned long own_send_time(struct bat_priv *bat_priv)
}
/* when do we schedule a forwarded packet to be sent */
-static unsigned long forward_send_time(struct bat_priv *bat_priv)
+static unsigned long forward_send_time(void)
{
return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
}
@@ -356,7 +355,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
else
batman_packet->flags &= ~DIRECTLINK;
- send_time = forward_send_time(bat_priv);
+ send_time = forward_send_time();
add_bat_packet_to_list(bat_priv,
(unsigned char *)batman_packet,
sizeof(struct batman_packet) + hna_buff_len,
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index c4cefa8..b68c272 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -22,8 +22,6 @@
#ifndef _NET_BATMAN_ADV_SEND_H_
#define _NET_BATMAN_ADV_SEND_H_
-#include "types.h"
-
int send_skb_packet(struct sk_buff *skb,
struct batman_if *batman_if,
uint8_t *dst_addr);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index e89ede1..bd088f8 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -26,7 +26,6 @@
#include "send.h"
#include "bat_debugfs.h"
#include "translation-table.h"
-#include "types.h"
#include "hash.h"
#include "gateway_common.h"
#include "gateway_client.h"
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 02b7733..e7b0e1a 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index a633b5a4..7fb6726 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -22,7 +22,6 @@
#include "main.h"
#include "translation-table.h"
#include "soft-interface.h"
-#include "types.h"
#include "hash.h"
#include "originator.h"
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 10c4c5c..f19931c 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -22,8 +22,6 @@
#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
-#include "types.h"
-
int hna_local_init(struct bat_priv *bat_priv);
void hna_local_add(struct net_device *soft_iface, uint8_t *addr);
void hna_local_remove(struct bat_priv *bat_priv,
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index bf3f6f5..7270405 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index d1a6113..121b11d 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
*
* Andreas Langer
*
@@ -39,8 +39,8 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
(struct unicast_frag_packet *)skb->data;
struct sk_buff *tmp_skb;
struct unicast_packet *unicast_packet;
- int hdr_len = sizeof(struct unicast_packet),
- uni_diff = sizeof(struct unicast_frag_packet) - hdr_len;
+ int hdr_len = sizeof(struct unicast_packet);
+ int uni_diff = sizeof(struct unicast_frag_packet) - hdr_len;
/* set skb to the first part and tmp_skb to the second part */
if (up->flags & UNI_FRAG_HEAD) {
@@ -229,7 +229,9 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
struct unicast_frag_packet *frag1, *frag2;
int uc_hdr_len = sizeof(struct unicast_packet);
int ucf_hdr_len = sizeof(struct unicast_frag_packet);
- int data_len = skb->len;
+ int data_len = skb->len - uc_hdr_len;
+ int large_tail = 0;
+ uint16_t seqno;
if (!bat_priv->primary_if)
goto dropped;
@@ -237,10 +239,11 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len);
if (!frag_skb)
goto dropped;
+ skb_reserve(frag_skb, ucf_hdr_len);
unicast_packet = (struct unicast_packet *) skb->data;
memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
- skb_split(skb, frag_skb, data_len / 2);
+ skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len);
if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 ||
my_skb_head_push(frag_skb, ucf_hdr_len) < 0)
@@ -258,13 +261,15 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
memcpy(frag1->orig, bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
memcpy(frag2, frag1, sizeof(struct unicast_frag_packet));
- frag1->flags |= UNI_FRAG_HEAD;
- frag2->flags &= ~UNI_FRAG_HEAD;
+ if (data_len & 1)
+ large_tail = UNI_FRAG_LARGETAIL;
+
+ frag1->flags = UNI_FRAG_HEAD | large_tail;
+ frag2->flags = large_tail;
- frag1->seqno = htons((uint16_t)atomic_inc_return(
- &batman_if->frag_seqno));
- frag2->seqno = htons((uint16_t)atomic_inc_return(
- &batman_if->frag_seqno));
+ seqno = atomic_add_return(2, &batman_if->frag_seqno);
+ frag1->seqno = htons(seqno - 1);
+ frag2->seqno = htons(seqno);
send_skb_packet(skb, batman_if, dstaddr);
send_skb_packet(frag_skb, batman_if, dstaddr);
@@ -281,7 +286,7 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
{
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
struct unicast_packet *unicast_packet;
- struct orig_node *orig_node;
+ struct orig_node *orig_node = NULL;
struct batman_if *batman_if;
struct neigh_node *router;
int data_len = skb->len;
@@ -292,11 +297,6 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
/* get routing information */
if (is_multicast_ether_addr(ethhdr->h_dest))
orig_node = (struct orig_node *)gw_get_selected(bat_priv);
- else
- orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
- compare_orig,
- choose_orig,
- ethhdr->h_dest));
/* check for hna host */
if (!orig_node)
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
index e32b786..8897308 100644
--- a/net/batman-adv/unicast.h
+++ b/net/batman-adv/unicast.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
*
* Andreas Langer
*
@@ -22,6 +22,8 @@
#ifndef _NET_BATMAN_ADV_UNICAST_H_
#define _NET_BATMAN_ADV_UNICAST_H_
+#include "packet.h"
+
#define FRAG_TIMEOUT 10000 /* purge frag list entrys after time in ms */
#define FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */
@@ -32,4 +34,25 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv);
int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
struct batman_if *batman_if, uint8_t dstaddr[]);
+static inline int frag_can_reassemble(struct sk_buff *skb, int mtu)
+{
+ struct unicast_frag_packet *unicast_packet;
+ int uneven_correction = 0;
+ unsigned int merged_size;
+
+ unicast_packet = (struct unicast_frag_packet *)skb->data;
+
+ if (unicast_packet->flags & UNI_FRAG_LARGETAIL) {
+ if (unicast_packet->flags & UNI_FRAG_HEAD)
+ uneven_correction = 1;
+ else
+ uneven_correction = -1;
+ }
+
+ merged_size = (skb->len - sizeof(struct unicast_frag_packet)) * 2;
+ merged_size += sizeof(struct unicast_packet) + uneven_correction;
+
+ return merged_size <= mtu;
+}
+
#endif /* _NET_BATMAN_ADV_UNICAST_H_ */
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index de1022c..7db9ad8 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2008-2011 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
diff --git a/net/batman-adv/vis.h b/net/batman-adv/vis.h
index 2c3b330..31b820d 100644
--- a/net/batman-adv/vis.h
+++ b/net/batman-adv/vis.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2008-2011 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 5564435..1461b19 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -297,6 +297,21 @@ void br_netpoll_disable(struct net_bridge_port *p)
#endif
+static int br_add_slave(struct net_device *dev, struct net_device *slave_dev)
+
+{
+ struct net_bridge *br = netdev_priv(dev);
+
+ return br_add_if(br, slave_dev);
+}
+
+static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
+{
+ struct net_bridge *br = netdev_priv(dev);
+
+ return br_del_if(br, slave_dev);
+}
+
static const struct ethtool_ops br_ethtool_ops = {
.get_drvinfo = br_getinfo,
.get_link = ethtool_op_get_link,
@@ -326,6 +341,8 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_netpoll_cleanup = br_netpoll_cleanup,
.ndo_poll_controller = br_poll_controller,
#endif
+ .ndo_add_slave = br_add_slave,
+ .ndo_del_slave = br_del_slave,
};
static void br_dev_free(struct net_device *dev)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index d9d1e2b..dce8f00 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -148,6 +148,8 @@ static void del_nbp(struct net_bridge_port *p)
netdev_rx_handler_unregister(dev);
+ netdev_set_master(dev, NULL);
+
br_multicast_del_port(p);
kobject_uevent(&p->kobj, KOBJ_REMOVE);
@@ -365,7 +367,7 @@ int br_min_mtu(const struct net_bridge *br)
void br_features_recompute(struct net_bridge *br)
{
struct net_bridge_port *p;
- unsigned long features, mask;
+ u32 features, mask;
features = mask = br->feature_mask;
if (list_empty(&br->port_list))
@@ -379,7 +381,7 @@ void br_features_recompute(struct net_bridge *br)
}
done:
- br->dev->features = netdev_fix_features(features, NULL);
+ br->dev->features = netdev_fix_features(br->dev, features);
}
/* called with RTNL */
@@ -429,10 +431,14 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
if (br_netpoll_info(br) && ((err = br_netpoll_enable(p))))
goto err3;
- err = netdev_rx_handler_register(dev, br_handle_frame, p);
+ err = netdev_set_master(dev, br->dev);
if (err)
goto err3;
+ err = netdev_rx_handler_register(dev, br_handle_frame, p);
+ if (err)
+ goto err4;
+
dev->priv_flags |= IFF_BRIDGE_PORT;
dev_disable_lro(dev);
@@ -455,6 +461,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
kobject_uevent(&p->kobj, KOBJ_ADD);
return 0;
+
+err4:
+ netdev_set_master(dev, NULL);
err3:
sysfs_remove_link(br->ifobj, p->dev->name);
err2:
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 4e1b620..f7afc36 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -182,7 +182,7 @@ struct net_bridge
struct br_cpu_netstats __percpu *stats;
spinlock_t hash_lock;
struct hlist_head hash[BR_HASH_SIZE];
- unsigned long feature_mask;
+ u32 feature_mask;
#ifdef CONFIG_BRIDGE_NETFILTER
struct rtable fake_rtable;
bool nf_call_iptables;
diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c
index 50a46af..2ed0056 100644
--- a/net/bridge/netfilter/ebt_ip6.c
+++ b/net/bridge/netfilter/ebt_ip6.c
@@ -22,9 +22,15 @@
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_ip6.h>
-struct tcpudphdr {
- __be16 src;
- __be16 dst;
+union pkthdr {
+ struct {
+ __be16 src;
+ __be16 dst;
+ } tcpudphdr;
+ struct {
+ u8 type;
+ u8 code;
+ } icmphdr;
};
static bool
@@ -33,8 +39,8 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
const struct ebt_ip6_info *info = par->matchinfo;
const struct ipv6hdr *ih6;
struct ipv6hdr _ip6h;
- const struct tcpudphdr *pptr;
- struct tcpudphdr _ports;
+ const union pkthdr *pptr;
+ union pkthdr _pkthdr;
ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h);
if (ih6 == NULL)
@@ -56,26 +62,34 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
return false;
if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO))
return false;
- if (!(info->bitmask & EBT_IP6_DPORT) &&
- !(info->bitmask & EBT_IP6_SPORT))
+ if (!(info->bitmask & ( EBT_IP6_DPORT |
+ EBT_IP6_SPORT | EBT_IP6_ICMP6)))
return true;
- pptr = skb_header_pointer(skb, offset_ph, sizeof(_ports),
- &_ports);
+
+ /* min icmpv6 headersize is 4, so sizeof(_pkthdr) is ok. */
+ pptr = skb_header_pointer(skb, offset_ph, sizeof(_pkthdr),
+ &_pkthdr);
if (pptr == NULL)
return false;
if (info->bitmask & EBT_IP6_DPORT) {
- u32 dst = ntohs(pptr->dst);
+ u16 dst = ntohs(pptr->tcpudphdr.dst);
if (FWINV(dst < info->dport[0] ||
dst > info->dport[1], EBT_IP6_DPORT))
return false;
}
if (info->bitmask & EBT_IP6_SPORT) {
- u32 src = ntohs(pptr->src);
+ u16 src = ntohs(pptr->tcpudphdr.src);
if (FWINV(src < info->sport[0] ||
src > info->sport[1], EBT_IP6_SPORT))
return false;
}
- return true;
+ if ((info->bitmask & EBT_IP6_ICMP6) &&
+ FWINV(pptr->icmphdr.type < info->icmpv6_type[0] ||
+ pptr->icmphdr.type > info->icmpv6_type[1] ||
+ pptr->icmphdr.code < info->icmpv6_code[0] ||
+ pptr->icmphdr.code > info->icmpv6_code[1],
+ EBT_IP6_ICMP6))
+ return false;
}
return true;
}
@@ -103,6 +117,14 @@ static int ebt_ip6_mt_check(const struct xt_mtchk_param *par)
return -EINVAL;
if (info->bitmask & EBT_IP6_SPORT && info->sport[0] > info->sport[1])
return -EINVAL;
+ if (info->bitmask & EBT_IP6_ICMP6) {
+ if ((info->invflags & EBT_IP6_PROTO) ||
+ info->protocol != IPPROTO_ICMPV6)
+ return -EINVAL;
+ if (info->icmpv6_type[0] > info->icmpv6_type[1] ||
+ info->icmpv6_code[0] > info->icmpv6_code[1])
+ return -EINVAL;
+ }
return 0;
}
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 16df053..5f1825d 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1764,6 +1764,7 @@ static int compat_table_info(const struct ebt_table_info *info,
newinfo->entries_size = size;
+ xt_compat_init_offsets(AF_INET, info->nentries);
return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
entries, newinfo);
}
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index c665de7..f1f98d9 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -23,10 +23,8 @@
#include <asm/atomic.h>
#define MAX_PHY_LAYERS 7
-#define PHY_NAME_LEN 20
#define container_obj(layr) container_of(layr, struct cfcnfg, layer)
-#define RFM_FRAGMENT_SIZE 4030
/* Information about CAIF physical interfaces held by Config Module in order
* to manage physical interfaces
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index d3ed264..27dab26 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -18,7 +18,6 @@
#define DGM_CMD_BIT 0x80
#define DGM_FLOW_OFF 0x81
#define DGM_FLOW_ON 0x80
-#define DGM_CTRL_PKT_SIZE 1
#define DGM_MTU 1500
static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt);
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 9297f7d..8303fe3 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -25,7 +25,6 @@ struct cfserl {
spinlock_t sync;
bool usestx;
};
-#define STXLEN(layr) (layr->usestx ? 1 : 0)
static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt);
static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index efad410..315c0d6 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -20,7 +20,7 @@
#define UTIL_REMOTE_SHUTDOWN 0x82
#define UTIL_FLOW_OFF 0x81
#define UTIL_FLOW_ON 0x80
-#define UTIL_CTRL_PKT_SIZE 1
+
static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt);
static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt);
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index 3b425b1..c3b1dec 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -17,7 +17,7 @@
#define VEI_FLOW_OFF 0x81
#define VEI_FLOW_ON 0x80
#define VEI_SET_PIN 0x82
-#define VEI_CTRL_PKT_SIZE 1
+
#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt);
diff --git a/net/core/dev.c b/net/core/dev.c
index 8ae6631..9d8bfd9 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -132,6 +132,7 @@
#include <trace/events/skb.h>
#include <linux/pci.h>
#include <linux/inetdevice.h>
+#include <linux/cpu_rmap.h>
#include "net-sysfs.h"
@@ -1289,7 +1290,7 @@ static int __dev_close(struct net_device *dev)
return retval;
}
-int dev_close_many(struct list_head *head)
+static int dev_close_many(struct list_head *head)
{
struct net_device *dev, *tmp;
LIST_HEAD(tmp_list);
@@ -1597,6 +1598,48 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
rcu_read_unlock();
}
+/* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
+ * @dev: Network device
+ * @txq: number of queues available
+ *
+ * If real_num_tx_queues is changed the tc mappings may no longer be
+ * valid. To resolve this verify the tc mapping remains valid and if
+ * not NULL the mapping. With no priorities mapping to this
+ * offset/count pair it will no longer be used. In the worst case TC0
+ * is invalid nothing can be done so disable priority mappings. If is
+ * expected that drivers will fix this mapping if they can before
+ * calling netif_set_real_num_tx_queues.
+ */
+static void netif_setup_tc(struct net_device *dev, unsigned int txq)
+{
+ int i;
+ struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
+
+ /* If TC0 is invalidated disable TC mapping */
+ if (tc->offset + tc->count > txq) {
+ pr_warning("Number of in use tx queues changed "
+ "invalidating tc mappings. Priority "
+ "traffic classification disabled!\n");
+ dev->num_tc = 0;
+ return;
+ }
+
+ /* Invalidated prio to tc mappings set to TC0 */
+ for (i = 1; i < TC_BITMASK + 1; i++) {
+ int q = netdev_get_prio_tc_map(dev, i);
+
+ tc = &dev->tc_to_txq[q];
+ if (tc->offset + tc->count > txq) {
+ pr_warning("Number of in use tx queues "
+ "changed. Priority %i to tc "
+ "mapping %i is no longer valid "
+ "setting map to 0\n",
+ i, q);
+ netdev_set_prio_tc_map(dev, i, 0);
+ }
+ }
+}
+
/*
* Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
* greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
@@ -1608,7 +1651,8 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
if (txq < 1 || txq > dev->num_tx_queues)
return -EINVAL;
- if (dev->reg_state == NETREG_REGISTERED) {
+ if (dev->reg_state == NETREG_REGISTERED ||
+ dev->reg_state == NETREG_UNREGISTERING) {
ASSERT_RTNL();
rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
@@ -1616,6 +1660,9 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
if (rc)
return rc;
+ if (dev->num_tc)
+ netif_setup_tc(dev, txq);
+
if (txq < dev->real_num_tx_queues)
qdisc_reset_all_tx_gt(dev, txq);
}
@@ -1815,7 +1862,7 @@ EXPORT_SYMBOL(skb_checksum_help);
* It may return NULL if the skb requires no segmentation. This is
* only possible when GSO is used for verifying header integrity.
*/
-struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
+struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
struct packet_type *ptype;
@@ -2003,7 +2050,7 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
protocol == htons(ETH_P_FCOE)));
}
-static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features)
+static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features)
{
if (!can_checksum_protocol(features, protocol)) {
features &= ~NETIF_F_ALL_CSUM;
@@ -2015,10 +2062,10 @@ static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features
return features;
}
-int netif_skb_features(struct sk_buff *skb)
+u32 netif_skb_features(struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
- int features = skb->dev->features;
+ u32 features = skb->dev->features;
if (protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
@@ -2063,7 +2110,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
int rc = NETDEV_TX_OK;
if (likely(!skb->next)) {
- int features;
+ u32 features;
/*
* If device doesnt need skb->dst, release it right now while
@@ -2165,6 +2212,8 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
unsigned int num_tx_queues)
{
u32 hash;
+ u16 qoffset = 0;
+ u16 qcount = num_tx_queues;
if (skb_rx_queue_recorded(skb)) {
hash = skb_get_rx_queue(skb);
@@ -2173,13 +2222,19 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
return hash;
}
+ if (dev->num_tc) {
+ u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
+ qoffset = dev->tc_to_txq[tc].offset;
+ qcount = dev->tc_to_txq[tc].count;
+ }
+
if (skb->sk && skb->sk->sk_hash)
hash = skb->sk->sk_hash;
else
hash = (__force u16) skb->protocol ^ skb->rxhash;
hash = jhash_1word(hash, hashrnd);
- return (u16) (((u64) hash * num_tx_queues) >> 32);
+ return (u16) (((u64) hash * qcount) >> 32) + qoffset;
}
EXPORT_SYMBOL(__skb_tx_hash);
@@ -2276,15 +2331,18 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
struct netdev_queue *txq)
{
spinlock_t *root_lock = qdisc_lock(q);
- bool contended = qdisc_is_running(q);
+ bool contended;
int rc;
+ qdisc_skb_cb(skb)->pkt_len = skb->len;
+ qdisc_calculate_pkt_len(skb, q);
/*
* Heuristic to force contended enqueues to serialize on a
* separate lock before trying to get qdisc main lock.
* This permits __QDISC_STATE_RUNNING owner to get the lock more often
* and dequeue packets faster.
*/
+ contended = qdisc_is_running(q);
if (unlikely(contended))
spin_lock(&q->busylock);
@@ -2302,7 +2360,6 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
skb_dst_force(skb);
- qdisc_skb_cb(skb)->pkt_len = skb->len;
qdisc_bstats_update(q, skb);
if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
@@ -2317,7 +2374,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
rc = NET_XMIT_SUCCESS;
} else {
skb_dst_force(skb);
- rc = qdisc_enqueue_root(skb, q);
+ rc = q->enqueue(skb, q) & NET_XMIT_MASK;
if (qdisc_run_begin(q)) {
if (unlikely(contended)) {
spin_unlock(&q->busylock);
@@ -2536,6 +2593,53 @@ EXPORT_SYMBOL(__skb_get_rxhash);
struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
EXPORT_SYMBOL(rps_sock_flow_table);
+static struct rps_dev_flow *
+set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+ struct rps_dev_flow *rflow, u16 next_cpu)
+{
+ u16 tcpu;
+
+ tcpu = rflow->cpu = next_cpu;
+ if (tcpu != RPS_NO_CPU) {
+#ifdef CONFIG_RFS_ACCEL
+ struct netdev_rx_queue *rxqueue;
+ struct rps_dev_flow_table *flow_table;
+ struct rps_dev_flow *old_rflow;
+ u32 flow_id;
+ u16 rxq_index;
+ int rc;
+
+ /* Should we steer this flow to a different hardware queue? */
+ if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap)
+ goto out;
+ rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
+ if (rxq_index == skb_get_rx_queue(skb))
+ goto out;
+
+ rxqueue = dev->_rx + rxq_index;
+ flow_table = rcu_dereference(rxqueue->rps_flow_table);
+ if (!flow_table)
+ goto out;
+ flow_id = skb->rxhash & flow_table->mask;
+ rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
+ rxq_index, flow_id);
+ if (rc < 0)
+ goto out;
+ old_rflow = rflow;
+ rflow = &flow_table->flows[flow_id];
+ rflow->cpu = next_cpu;
+ rflow->filter = rc;
+ if (old_rflow->filter == rflow->filter)
+ old_rflow->filter = RPS_NO_FILTER;
+ out:
+#endif
+ rflow->last_qtail =
+ per_cpu(softnet_data, tcpu).input_queue_head;
+ }
+
+ return rflow;
+}
+
/*
* get_rps_cpu is called from netif_receive_skb and returns the target
* CPU from the RPS map of the receiving queue for a given skb.
@@ -2607,12 +2711,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
if (unlikely(tcpu != next_cpu) &&
(tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
((int)(per_cpu(softnet_data, tcpu).input_queue_head -
- rflow->last_qtail)) >= 0)) {
- tcpu = rflow->cpu = next_cpu;
- if (tcpu != RPS_NO_CPU)
- rflow->last_qtail = per_cpu(softnet_data,
- tcpu).input_queue_head;
- }
+ rflow->last_qtail)) >= 0))
+ rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
+
if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
*rflowp = rflow;
cpu = tcpu;
@@ -2633,6 +2734,46 @@ done:
return cpu;
}
+#ifdef CONFIG_RFS_ACCEL
+
+/**
+ * rps_may_expire_flow - check whether an RFS hardware filter may be removed
+ * @dev: Device on which the filter was set
+ * @rxq_index: RX queue index
+ * @flow_id: Flow ID passed to ndo_rx_flow_steer()
+ * @filter_id: Filter ID returned by ndo_rx_flow_steer()
+ *
+ * Drivers that implement ndo_rx_flow_steer() should periodically call
+ * this function for each installed filter and remove the filters for
+ * which it returns %true.
+ */
+bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
+ u32 flow_id, u16 filter_id)
+{
+ struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
+ struct rps_dev_flow_table *flow_table;
+ struct rps_dev_flow *rflow;
+ bool expire = true;
+ int cpu;
+
+ rcu_read_lock();
+ flow_table = rcu_dereference(rxqueue->rps_flow_table);
+ if (flow_table && flow_id <= flow_table->mask) {
+ rflow = &flow_table->flows[flow_id];
+ cpu = ACCESS_ONCE(rflow->cpu);
+ if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
+ ((int)(per_cpu(softnet_data, cpu).input_queue_head -
+ rflow->last_qtail) <
+ (int)(10 * flow_table->mask)))
+ expire = false;
+ }
+ rcu_read_unlock();
+ return expire;
+}
+EXPORT_SYMBOL(rps_may_expire_flow);
+
+#endif /* CONFIG_RFS_ACCEL */
+
/* Called from hardirq (IPI) context */
static void rps_trigger_softirq(void *data)
{
@@ -2968,7 +3109,8 @@ static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
* duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
* ARP on active-backup slaves with arp_validate enabled.
*/
-int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
+static int __skb_bond_should_drop(struct sk_buff *skb,
+ struct net_device *master)
{
struct net_device *dev = skb->dev;
@@ -3002,14 +3144,12 @@ int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
}
return 0;
}
-EXPORT_SYMBOL(__skb_bond_should_drop);
static int __netif_receive_skb(struct sk_buff *skb)
{
struct packet_type *ptype, *pt_prev;
rx_handler_func_t *rx_handler;
struct net_device *orig_dev;
- struct net_device *master;
struct net_device *null_or_orig;
struct net_device *orig_or_bond;
int ret = NET_RX_DROP;
@@ -3036,15 +3176,19 @@ static int __netif_receive_skb(struct sk_buff *skb)
*/
null_or_orig = NULL;
orig_dev = skb->dev;
- master = ACCESS_ONCE(orig_dev->master);
if (skb->deliver_no_wcard)
null_or_orig = orig_dev;
- else if (master) {
- if (skb_bond_should_drop(skb, master)) {
- skb->deliver_no_wcard = 1;
- null_or_orig = orig_dev; /* deliver only exact match */
- } else
- skb->dev = master;
+ else if (netif_is_bond_slave(orig_dev)) {
+ struct net_device *bond_master = ACCESS_ONCE(orig_dev->master);
+
+ if (likely(bond_master)) {
+ if (__skb_bond_should_drop(skb, bond_master)) {
+ skb->deliver_no_wcard = 1;
+ /* deliver only exact match */
+ null_or_orig = orig_dev;
+ } else
+ skb->dev = bond_master;
+ }
}
__this_cpu_inc(softnet_data.processed);
@@ -3917,12 +4061,15 @@ void *dev_seq_start(struct seq_file *seq, loff_t *pos)
void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct net_device *dev = (v == SEQ_START_TOKEN) ?
- first_net_device(seq_file_net(seq)) :
- next_net_device((struct net_device *)v);
+ struct net_device *dev = v;
+
+ if (v == SEQ_START_TOKEN)
+ dev = first_net_device_rcu(seq_file_net(seq));
+ else
+ dev = next_net_device_rcu(dev);
++*pos;
- return rcu_dereference(dev);
+ return dev;
}
void dev_seq_stop(struct seq_file *seq, void *v)
@@ -4206,15 +4353,14 @@ static int __init dev_proc_init(void)
/**
- * netdev_set_master - set up master/slave pair
+ * netdev_set_master - set up master pointer
* @slave: slave device
* @master: new master device
*
* Changes the master device of the slave. Pass %NULL to break the
* bonding. The caller must hold the RTNL semaphore. On a failure
* a negative errno code is returned. On success the reference counts
- * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
- * function returns zero.
+ * are adjusted and the function returns zero.
*/
int netdev_set_master(struct net_device *slave, struct net_device *master)
{
@@ -4234,6 +4380,29 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
synchronize_net();
dev_put(old);
}
+ return 0;
+}
+EXPORT_SYMBOL(netdev_set_master);
+
+/**
+ * netdev_set_bond_master - set up bonding master/slave pair
+ * @slave: slave device
+ * @master: new master device
+ *
+ * Changes the master device of the slave. Pass %NULL to break the
+ * bonding. The caller must hold the RTNL semaphore. On a failure
+ * a negative errno code is returned. On success %RTM_NEWLINK is sent
+ * to the routing socket and the function returns zero.
+ */
+int netdev_set_bond_master(struct net_device *slave, struct net_device *master)
+{
+ int err;
+
+ ASSERT_RTNL();
+
+ err = netdev_set_master(slave, master);
+ if (err)
+ return err;
if (master)
slave->flags |= IFF_SLAVE;
else
@@ -4242,7 +4411,7 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
return 0;
}
-EXPORT_SYMBOL(netdev_set_master);
+EXPORT_SYMBOL(netdev_set_bond_master);
static void dev_change_rx_flags(struct net_device *dev, int flags)
{
@@ -4579,6 +4748,17 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
EXPORT_SYMBOL(dev_set_mtu);
/**
+ * dev_set_group - Change group this device belongs to
+ * @dev: device
+ * @new_group: group this device should belong to
+ */
+void dev_set_group(struct net_device *dev, int new_group)
+{
+ dev->group = new_group;
+}
+EXPORT_SYMBOL(dev_set_group);
+
+/**
* dev_set_mac_address - Change Media Access Control Address
* @dev: device
* @sa: new address
@@ -5069,41 +5249,55 @@ static void rollback_registered(struct net_device *dev)
list_del(&single);
}
-unsigned long netdev_fix_features(unsigned long features, const char *name)
+u32 netdev_fix_features(struct net_device *dev, u32 features)
{
+ /* Fix illegal checksum combinations */
+ if ((features & NETIF_F_HW_CSUM) &&
+ (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
+ netdev_info(dev, "mixed HW and IP checksum settings.\n");
+ features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
+ }
+
+ if ((features & NETIF_F_NO_CSUM) &&
+ (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
+ netdev_info(dev, "mixed no checksumming and other settings.\n");
+ features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
+ }
+
/* Fix illegal SG+CSUM combinations. */
if ((features & NETIF_F_SG) &&
!(features & NETIF_F_ALL_CSUM)) {
- if (name)
- printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
- "checksum feature.\n", name);
+ netdev_info(dev,
+ "Dropping NETIF_F_SG since no checksum feature.\n");
features &= ~NETIF_F_SG;
}
/* TSO requires that SG is present as well. */
if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
- if (name)
- printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
- "SG feature.\n", name);
+ netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n");
features &= ~NETIF_F_TSO;
}
+ /* Software GSO depends on SG. */
+ if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
+ netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
+ features &= ~NETIF_F_GSO;
+ }
+
+ /* UFO needs SG and checksumming */
if (features & NETIF_F_UFO) {
/* maybe split UFO into V4 and V6? */
if (!((features & NETIF_F_GEN_CSUM) ||
(features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
== (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
- if (name)
- printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
- "since no checksum offload features.\n",
- name);
+ netdev_info(dev,
+ "Dropping NETIF_F_UFO since no checksum offload features.\n");
features &= ~NETIF_F_UFO;
}
if (!(features & NETIF_F_SG)) {
- if (name)
- printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
- "since no NETIF_F_SG feature.\n", name);
+ netdev_info(dev,
+ "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
features &= ~NETIF_F_UFO;
}
}
@@ -5112,6 +5306,37 @@ unsigned long netdev_fix_features(unsigned long features, const char *name)
}
EXPORT_SYMBOL(netdev_fix_features);
+void netdev_update_features(struct net_device *dev)
+{
+ u32 features;
+ int err = 0;
+
+ features = netdev_get_wanted_features(dev);
+
+ if (dev->netdev_ops->ndo_fix_features)
+ features = dev->netdev_ops->ndo_fix_features(dev, features);
+
+ /* driver might be less strict about feature dependencies */
+ features = netdev_fix_features(dev, features);
+
+ if (dev->features == features)
+ return;
+
+ netdev_info(dev, "Features changed: 0x%08x -> 0x%08x\n",
+ dev->features, features);
+
+ if (dev->netdev_ops->ndo_set_features)
+ err = dev->netdev_ops->ndo_set_features(dev, features);
+
+ if (!err)
+ dev->features = features;
+ else if (err < 0)
+ netdev_err(dev,
+ "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
+ err, features, dev->features);
+}
+EXPORT_SYMBOL(netdev_update_features);
+
/**
* netif_stacked_transfer_operstate - transfer operstate
* @rootdev: the root or lower level device to transfer state from
@@ -5246,26 +5471,18 @@ int register_netdevice(struct net_device *dev)
if (dev->iflink == -1)
dev->iflink = dev->ifindex;
- /* Fix illegal checksum combinations */
- if ((dev->features & NETIF_F_HW_CSUM) &&
- (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
- printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
- dev->name);
- dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
- }
-
- if ((dev->features & NETIF_F_NO_CSUM) &&
- (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
- printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
- dev->name);
- dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
- }
+ /* Transfer changeable features to wanted_features and enable
+ * software offloads (GSO and GRO).
+ */
+ dev->hw_features |= NETIF_F_SOFT_FEATURES;
+ dev->wanted_features = (dev->features & dev->hw_features)
+ | NETIF_F_SOFT_FEATURES;
- dev->features = netdev_fix_features(dev->features, dev->name);
+ /* Avoid warning from netdev_fix_features() for GSO without SG */
+ if (!(dev->wanted_features & NETIF_F_SG))
+ dev->wanted_features &= ~NETIF_F_GSO;
- /* Enable software GSO if SG is supported. */
- if (dev->features & NETIF_F_SG)
- dev->features |= NETIF_F_GSO;
+ netdev_update_features(dev);
/* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
* vlan_dev_init() will do the dev->features check, so these features
@@ -5687,6 +5904,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
#endif
strcpy(dev->name, name);
+ dev->group = INIT_NETDEV_GROUP;
return dev;
free_all:
@@ -6001,8 +6219,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
* @one to the master device with current feature set @all. Will not
* enable anything that is off in @mask. Returns the new feature set.
*/
-unsigned long netdev_increment_features(unsigned long all, unsigned long one,
- unsigned long mask)
+u32 netdev_increment_features(u32 all, u32 one, u32 mask)
{
/* If device needs checksumming, downgrade to it. */
if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
diff --git a/net/core/dst.c b/net/core/dst.c
index b99c7c7..91104d3 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -164,7 +164,9 @@ int dst_discard(struct sk_buff *skb)
}
EXPORT_SYMBOL(dst_discard);
-void *dst_alloc(struct dst_ops *ops)
+const u32 dst_default_metrics[RTAX_MAX];
+
+void *dst_alloc(struct dst_ops *ops, int initial_ref)
{
struct dst_entry *dst;
@@ -175,11 +177,12 @@ void *dst_alloc(struct dst_ops *ops)
dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC);
if (!dst)
return NULL;
- atomic_set(&dst->__refcnt, 0);
+ atomic_set(&dst->__refcnt, initial_ref);
dst->ops = ops;
dst->lastuse = jiffies;
dst->path = dst;
dst->input = dst->output = dst_discard;
+ dst_init_metrics(dst, dst_default_metrics, true);
#if RT_CACHE_DEBUG >= 2
atomic_inc(&dst_total);
#endif
@@ -282,6 +285,42 @@ void dst_release(struct dst_entry *dst)
}
EXPORT_SYMBOL(dst_release);
+u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
+{
+ u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
+
+ if (p) {
+ u32 *old_p = __DST_METRICS_PTR(old);
+ unsigned long prev, new;
+
+ memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+
+ new = (unsigned long) p;
+ prev = cmpxchg(&dst->_metrics, old, new);
+
+ if (prev != old) {
+ kfree(p);
+ p = __DST_METRICS_PTR(prev);
+ if (prev & DST_METRICS_READ_ONLY)
+ p = NULL;
+ }
+ }
+ return p;
+}
+EXPORT_SYMBOL(dst_cow_metrics_generic);
+
+/* Caller asserts that dst_metrics_read_only(dst) is false. */
+void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
+{
+ unsigned long prev, new;
+
+ new = (unsigned long) dst_default_metrics;
+ prev = cmpxchg(&dst->_metrics, old, new);
+ if (prev == old)
+ kfree(__DST_METRICS_PTR(old));
+}
+EXPORT_SYMBOL(__dst_destroy_metrics_generic);
+
/**
* skb_dst_set_noref - sets skb dst, without a reference
* @skb: buffer
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index ff23029..66cdc76 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -34,12 +34,6 @@ u32 ethtool_op_get_link(struct net_device *dev)
}
EXPORT_SYMBOL(ethtool_op_get_link);
-u32 ethtool_op_get_rx_csum(struct net_device *dev)
-{
- return (dev->features & NETIF_F_ALL_CSUM) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_rx_csum);
-
u32 ethtool_op_get_tx_csum(struct net_device *dev)
{
return (dev->features & NETIF_F_ALL_CSUM) != 0;
@@ -55,6 +49,7 @@ int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
return 0;
}
+EXPORT_SYMBOL(ethtool_op_set_tx_csum);
int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data)
{
@@ -171,6 +166,306 @@ EXPORT_SYMBOL(ethtool_ntuple_flush);
/* Handlers for each ethtool command */
+#define ETHTOOL_DEV_FEATURE_WORDS 1
+
+static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_gfeatures cmd = {
+ .cmd = ETHTOOL_GFEATURES,
+ .size = ETHTOOL_DEV_FEATURE_WORDS,
+ };
+ struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS] = {
+ {
+ .available = dev->hw_features,
+ .requested = dev->wanted_features,
+ .active = dev->features,
+ .never_changed = NETIF_F_NEVER_CHANGE,
+ },
+ };
+ u32 __user *sizeaddr;
+ u32 copy_size;
+
+ sizeaddr = useraddr + offsetof(struct ethtool_gfeatures, size);
+ if (get_user(copy_size, sizeaddr))
+ return -EFAULT;
+
+ if (copy_size > ETHTOOL_DEV_FEATURE_WORDS)
+ copy_size = ETHTOOL_DEV_FEATURE_WORDS;
+
+ if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
+ return -EFAULT;
+ useraddr += sizeof(cmd);
+ if (copy_to_user(useraddr, features, copy_size * sizeof(*features)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_sfeatures cmd;
+ struct ethtool_set_features_block features[ETHTOOL_DEV_FEATURE_WORDS];
+ int ret = 0;
+
+ if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+ return -EFAULT;
+ useraddr += sizeof(cmd);
+
+ if (cmd.size != ETHTOOL_DEV_FEATURE_WORDS)
+ return -EINVAL;
+
+ if (copy_from_user(features, useraddr, sizeof(features)))
+ return -EFAULT;
+
+ if (features[0].valid & ~NETIF_F_ETHTOOL_BITS)
+ return -EINVAL;
+
+ if (features[0].valid & ~dev->hw_features) {
+ features[0].valid &= dev->hw_features;
+ ret |= ETHTOOL_F_UNSUPPORTED;
+ }
+
+ dev->wanted_features &= ~features[0].valid;
+ dev->wanted_features |= features[0].valid & features[0].requested;
+ netdev_update_features(dev);
+
+ if ((dev->wanted_features ^ dev->features) & features[0].valid)
+ ret |= ETHTOOL_F_WISH;
+
+ return ret;
+}
+
+static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GSTRING_LEN] = {
+ /* NETIF_F_SG */ "tx-scatter-gather",
+ /* NETIF_F_IP_CSUM */ "tx-checksum-ipv4",
+ /* NETIF_F_NO_CSUM */ "tx-checksum-unneeded",
+ /* NETIF_F_HW_CSUM */ "tx-checksum-ip-generic",
+ /* NETIF_F_IPV6_CSUM */ "tx_checksum-ipv6",
+ /* NETIF_F_HIGHDMA */ "highdma",
+ /* NETIF_F_FRAGLIST */ "tx-scatter-gather-fraglist",
+ /* NETIF_F_HW_VLAN_TX */ "tx-vlan-hw-insert",
+
+ /* NETIF_F_HW_VLAN_RX */ "rx-vlan-hw-parse",
+ /* NETIF_F_HW_VLAN_FILTER */ "rx-vlan-filter",
+ /* NETIF_F_VLAN_CHALLENGED */ "vlan-challenged",
+ /* NETIF_F_GSO */ "tx-generic-segmentation",
+ /* NETIF_F_LLTX */ "tx-lockless",
+ /* NETIF_F_NETNS_LOCAL */ "netns-local",
+ /* NETIF_F_GRO */ "rx-gro",
+ /* NETIF_F_LRO */ "rx-lro",
+
+ /* NETIF_F_TSO */ "tx-tcp-segmentation",
+ /* NETIF_F_UFO */ "tx-udp-fragmentation",
+ /* NETIF_F_GSO_ROBUST */ "tx-gso-robust",
+ /* NETIF_F_TSO_ECN */ "tx-tcp-ecn-segmentation",
+ /* NETIF_F_TSO6 */ "tx-tcp6-segmentation",
+ /* NETIF_F_FSO */ "tx-fcoe-segmentation",
+ "",
+ "",
+
+ /* NETIF_F_FCOE_CRC */ "tx-checksum-fcoe-crc",
+ /* NETIF_F_SCTP_CSUM */ "tx-checksum-sctp",
+ /* NETIF_F_FCOE_MTU */ "fcoe-mtu",
+ /* NETIF_F_NTUPLE */ "rx-ntuple-filter",
+ /* NETIF_F_RXHASH */ "rx-hashing",
+ /* NETIF_F_RXCSUM */ "rx-checksum",
+ "",
+ "",
+};
+
+static int __ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+
+ if (sset == ETH_SS_FEATURES)
+ return ARRAY_SIZE(netdev_features_strings);
+
+ if (ops && ops->get_sset_count && ops->get_strings)
+ return ops->get_sset_count(dev, sset);
+ else
+ return -EOPNOTSUPP;
+}
+
+static void __ethtool_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+
+ if (stringset == ETH_SS_FEATURES)
+ memcpy(data, netdev_features_strings,
+ sizeof(netdev_features_strings));
+ else
+ /* ops->get_strings is valid because checked earlier */
+ ops->get_strings(dev, stringset, data);
+}
+
+static u32 ethtool_get_feature_mask(u32 eth_cmd)
+{
+ /* feature masks of legacy discrete ethtool ops */
+
+ switch (eth_cmd) {
+ case ETHTOOL_GTXCSUM:
+ case ETHTOOL_STXCSUM:
+ return NETIF_F_ALL_CSUM | NETIF_F_SCTP_CSUM;
+ case ETHTOOL_GRXCSUM:
+ case ETHTOOL_SRXCSUM:
+ return NETIF_F_RXCSUM;
+ case ETHTOOL_GSG:
+ case ETHTOOL_SSG:
+ return NETIF_F_SG;
+ case ETHTOOL_GTSO:
+ case ETHTOOL_STSO:
+ return NETIF_F_ALL_TSO;
+ case ETHTOOL_GUFO:
+ case ETHTOOL_SUFO:
+ return NETIF_F_UFO;
+ case ETHTOOL_GGSO:
+ case ETHTOOL_SGSO:
+ return NETIF_F_GSO;
+ case ETHTOOL_GGRO:
+ case ETHTOOL_SGRO:
+ return NETIF_F_GRO;
+ default:
+ BUG();
+ }
+}
+
+static void *__ethtool_get_one_feature_actor(struct net_device *dev, u32 ethcmd)
+{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+
+ if (!ops)
+ return NULL;
+
+ switch (ethcmd) {
+ case ETHTOOL_GTXCSUM:
+ return ops->get_tx_csum;
+ case ETHTOOL_GRXCSUM:
+ return ops->get_rx_csum;
+ case ETHTOOL_SSG:
+ return ops->get_sg;
+ case ETHTOOL_STSO:
+ return ops->get_tso;
+ case ETHTOOL_SUFO:
+ return ops->get_ufo;
+ default:
+ return NULL;
+ }
+}
+
+static u32 __ethtool_get_rx_csum_oldbug(struct net_device *dev)
+{
+ return !!(dev->features & NETIF_F_ALL_CSUM);
+}
+
+static int ethtool_get_one_feature(struct net_device *dev,
+ char __user *useraddr, u32 ethcmd)
+{
+ u32 mask = ethtool_get_feature_mask(ethcmd);
+ struct ethtool_value edata = {
+ .cmd = ethcmd,
+ .data = !!(dev->features & mask),
+ };
+
+ /* compatibility with discrete get_ ops */
+ if (!(dev->hw_features & mask)) {
+ u32 (*actor)(struct net_device *);
+
+ actor = __ethtool_get_one_feature_actor(dev, ethcmd);
+
+ /* bug compatibility with old get_rx_csum */
+ if (ethcmd == ETHTOOL_GRXCSUM && !actor)
+ actor = __ethtool_get_rx_csum_oldbug;
+
+ if (actor)
+ edata.data = actor(dev);
+ }
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int __ethtool_set_tx_csum(struct net_device *dev, u32 data);
+static int __ethtool_set_rx_csum(struct net_device *dev, u32 data);
+static int __ethtool_set_sg(struct net_device *dev, u32 data);
+static int __ethtool_set_tso(struct net_device *dev, u32 data);
+static int __ethtool_set_ufo(struct net_device *dev, u32 data);
+
+static int ethtool_set_one_feature(struct net_device *dev,
+ void __user *useraddr, u32 ethcmd)
+{
+ struct ethtool_value edata;
+ u32 mask;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ mask = ethtool_get_feature_mask(ethcmd);
+ mask &= dev->hw_features;
+ if (mask) {
+ if (edata.data)
+ dev->wanted_features |= mask;
+ else
+ dev->wanted_features &= ~mask;
+
+ netdev_update_features(dev);
+ return 0;
+ }
+
+ /* Driver is not converted to ndo_fix_features or does not
+ * support changing this offload. In the latter case it won't
+ * have corresponding ethtool_ops field set.
+ *
+ * Following part is to be removed after all drivers advertise
+ * their changeable features in netdev->hw_features and stop
+ * using discrete offload setting ops.
+ */
+
+ switch (ethcmd) {
+ case ETHTOOL_STXCSUM:
+ return __ethtool_set_tx_csum(dev, edata.data);
+ case ETHTOOL_SRXCSUM:
+ return __ethtool_set_rx_csum(dev, edata.data);
+ case ETHTOOL_SSG:
+ return __ethtool_set_sg(dev, edata.data);
+ case ETHTOOL_STSO:
+ return __ethtool_set_tso(dev, edata.data);
+ case ETHTOOL_SUFO:
+ return __ethtool_set_ufo(dev, edata.data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int __ethtool_set_flags(struct net_device *dev, u32 data)
+{
+ u32 changed;
+
+ if (data & ~flags_dup_features)
+ return -EINVAL;
+
+ /* legacy set_flags() op */
+ if (dev->ethtool_ops->set_flags) {
+ if (unlikely(dev->hw_features & flags_dup_features))
+ netdev_warn(dev,
+ "driver BUG: mixed hw_features and set_flags()\n");
+ return dev->ethtool_ops->set_flags(dev, data);
+ }
+
+ /* allow changing only bits set in hw_features */
+ changed = (data ^ dev->wanted_features) & flags_dup_features;
+ if (changed & ~dev->hw_features)
+ return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP;
+
+ dev->wanted_features =
+ (dev->wanted_features & ~changed) | data;
+
+ netdev_update_features(dev);
+
+ return 0;
+}
+
static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
{
struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
@@ -251,14 +546,10 @@ static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev,
void __user *useraddr)
{
struct ethtool_sset_info info;
- const struct ethtool_ops *ops = dev->ethtool_ops;
u64 sset_mask;
int i, idx = 0, n_bits = 0, ret, rc;
u32 *info_buf = NULL;
- if (!ops->get_sset_count)
- return -EOPNOTSUPP;
-
if (copy_from_user(&info, useraddr, sizeof(info)))
return -EFAULT;
@@ -285,7 +576,7 @@ static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev,
if (!(sset_mask & (1ULL << i)))
continue;
- rc = ops->get_sset_count(dev, i);
+ rc = __ethtool_get_sset_count(dev, i);
if (rc >= 0) {
info.sset_mask |= (1ULL << i);
info_buf[idx++] = rc;
@@ -1091,6 +1382,9 @@ static int __ethtool_set_sg(struct net_device *dev, u32 data)
{
int err;
+ if (data && !(dev->features & NETIF_F_ALL_CSUM))
+ return -EINVAL;
+
if (!data && dev->ethtool_ops->set_tso) {
err = dev->ethtool_ops->set_tso(dev, 0);
if (err)
@@ -1105,145 +1399,55 @@ static int __ethtool_set_sg(struct net_device *dev, u32 data)
return dev->ethtool_ops->set_sg(dev, data);
}
-static int ethtool_set_tx_csum(struct net_device *dev, char __user *useraddr)
+static int __ethtool_set_tx_csum(struct net_device *dev, u32 data)
{
- struct ethtool_value edata;
int err;
if (!dev->ethtool_ops->set_tx_csum)
return -EOPNOTSUPP;
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
-
- if (!edata.data && dev->ethtool_ops->set_sg) {
+ if (!data && dev->ethtool_ops->set_sg) {
err = __ethtool_set_sg(dev, 0);
if (err)
return err;
}
- return dev->ethtool_ops->set_tx_csum(dev, edata.data);
+ return dev->ethtool_ops->set_tx_csum(dev, data);
}
-EXPORT_SYMBOL(ethtool_op_set_tx_csum);
-static int ethtool_set_rx_csum(struct net_device *dev, char __user *useraddr)
+static int __ethtool_set_rx_csum(struct net_device *dev, u32 data)
{
- struct ethtool_value edata;
-
if (!dev->ethtool_ops->set_rx_csum)
return -EOPNOTSUPP;
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
-
- if (!edata.data && dev->ethtool_ops->set_sg)
+ if (!data)
dev->features &= ~NETIF_F_GRO;
- return dev->ethtool_ops->set_rx_csum(dev, edata.data);
+ return dev->ethtool_ops->set_rx_csum(dev, data);
}
-static int ethtool_set_sg(struct net_device *dev, char __user *useraddr)
+static int __ethtool_set_tso(struct net_device *dev, u32 data)
{
- struct ethtool_value edata;
-
- if (!dev->ethtool_ops->set_sg)
- return -EOPNOTSUPP;
-
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
-
- if (edata.data &&
- !(dev->features & NETIF_F_ALL_CSUM))
- return -EINVAL;
-
- return __ethtool_set_sg(dev, edata.data);
-}
-
-static int ethtool_set_tso(struct net_device *dev, char __user *useraddr)
-{
- struct ethtool_value edata;
-
if (!dev->ethtool_ops->set_tso)
return -EOPNOTSUPP;
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
-
- if (edata.data && !(dev->features & NETIF_F_SG))
+ if (data && !(dev->features & NETIF_F_SG))
return -EINVAL;
- return dev->ethtool_ops->set_tso(dev, edata.data);
+ return dev->ethtool_ops->set_tso(dev, data);
}
-static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr)
+static int __ethtool_set_ufo(struct net_device *dev, u32 data)
{
- struct ethtool_value edata;
-
if (!dev->ethtool_ops->set_ufo)
return -EOPNOTSUPP;
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
- if (edata.data && !(dev->features & NETIF_F_SG))
+ if (data && !(dev->features & NETIF_F_SG))
return -EINVAL;
- if (edata.data && !((dev->features & NETIF_F_GEN_CSUM) ||
+ if (data && !((dev->features & NETIF_F_GEN_CSUM) ||
(dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
== (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)))
return -EINVAL;
- return dev->ethtool_ops->set_ufo(dev, edata.data);
-}
-
-static int ethtool_get_gso(struct net_device *dev, char __user *useraddr)
-{
- struct ethtool_value edata = { ETHTOOL_GGSO };
-
- edata.data = dev->features & NETIF_F_GSO;
- if (copy_to_user(useraddr, &edata, sizeof(edata)))
- return -EFAULT;
- return 0;
-}
-
-static int ethtool_set_gso(struct net_device *dev, char __user *useraddr)
-{
- struct ethtool_value edata;
-
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
- if (edata.data)
- dev->features |= NETIF_F_GSO;
- else
- dev->features &= ~NETIF_F_GSO;
- return 0;
-}
-
-static int ethtool_get_gro(struct net_device *dev, char __user *useraddr)
-{
- struct ethtool_value edata = { ETHTOOL_GGRO };
-
- edata.data = dev->features & NETIF_F_GRO;
- if (copy_to_user(useraddr, &edata, sizeof(edata)))
- return -EFAULT;
- return 0;
-}
-
-static int ethtool_set_gro(struct net_device *dev, char __user *useraddr)
-{
- struct ethtool_value edata;
-
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
-
- if (edata.data) {
- u32 rxcsum = dev->ethtool_ops->get_rx_csum ?
- dev->ethtool_ops->get_rx_csum(dev) :
- ethtool_op_get_rx_csum(dev);
-
- if (!rxcsum)
- return -EINVAL;
- dev->features |= NETIF_F_GRO;
- } else
- dev->features &= ~NETIF_F_GRO;
-
- return 0;
+ return dev->ethtool_ops->set_ufo(dev, data);
}
static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
@@ -1287,17 +1491,13 @@ static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
{
struct ethtool_gstrings gstrings;
- const struct ethtool_ops *ops = dev->ethtool_ops;
u8 *data;
int ret;
- if (!ops->get_strings || !ops->get_sset_count)
- return -EOPNOTSUPP;
-
if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
return -EFAULT;
- ret = ops->get_sset_count(dev, gstrings.string_set);
+ ret = __ethtool_get_sset_count(dev, gstrings.string_set);
if (ret < 0)
return ret;
@@ -1307,7 +1507,7 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
if (!data)
return -ENOMEM;
- ops->get_strings(dev, gstrings.string_set, data);
+ __ethtool_get_strings(dev, gstrings.string_set, data);
ret = -EFAULT;
if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
@@ -1317,7 +1517,7 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
goto out;
ret = 0;
- out:
+out:
kfree(data);
return ret;
}
@@ -1458,7 +1658,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
void __user *useraddr = ifr->ifr_data;
u32 ethcmd;
int rc;
- unsigned long old_features;
+ u32 old_features;
if (!dev || !netif_device_present(dev))
return -ENODEV;
@@ -1500,6 +1700,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GRXCLSRLCNT:
case ETHTOOL_GRXCLSRULE:
case ETHTOOL_GRXCLSRLALL:
+ case ETHTOOL_GFEATURES:
break;
default:
if (!capable(CAP_NET_ADMIN))
@@ -1570,42 +1771,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_SPAUSEPARAM:
rc = ethtool_set_pauseparam(dev, useraddr);
break;
- case ETHTOOL_GRXCSUM:
- rc = ethtool_get_value(dev, useraddr, ethcmd,
- (dev->ethtool_ops->get_rx_csum ?
- dev->ethtool_ops->get_rx_csum :
- ethtool_op_get_rx_csum));
- break;
- case ETHTOOL_SRXCSUM:
- rc = ethtool_set_rx_csum(dev, useraddr);
- break;
- case ETHTOOL_GTXCSUM:
- rc = ethtool_get_value(dev, useraddr, ethcmd,
- (dev->ethtool_ops->get_tx_csum ?
- dev->ethtool_ops->get_tx_csum :
- ethtool_op_get_tx_csum));
- break;
- case ETHTOOL_STXCSUM:
- rc = ethtool_set_tx_csum(dev, useraddr);
- break;
- case ETHTOOL_GSG:
- rc = ethtool_get_value(dev, useraddr, ethcmd,
- (dev->ethtool_ops->get_sg ?
- dev->ethtool_ops->get_sg :
- ethtool_op_get_sg));
- break;
- case ETHTOOL_SSG:
- rc = ethtool_set_sg(dev, useraddr);
- break;
- case ETHTOOL_GTSO:
- rc = ethtool_get_value(dev, useraddr, ethcmd,
- (dev->ethtool_ops->get_tso ?
- dev->ethtool_ops->get_tso :
- ethtool_op_get_tso));
- break;
- case ETHTOOL_STSO:
- rc = ethtool_set_tso(dev, useraddr);
- break;
case ETHTOOL_TEST:
rc = ethtool_self_test(dev, useraddr);
break;
@@ -1621,21 +1786,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GPERMADDR:
rc = ethtool_get_perm_addr(dev, useraddr);
break;
- case ETHTOOL_GUFO:
- rc = ethtool_get_value(dev, useraddr, ethcmd,
- (dev->ethtool_ops->get_ufo ?
- dev->ethtool_ops->get_ufo :
- ethtool_op_get_ufo));
- break;
- case ETHTOOL_SUFO:
- rc = ethtool_set_ufo(dev, useraddr);
- break;
- case ETHTOOL_GGSO:
- rc = ethtool_get_gso(dev, useraddr);
- break;
- case ETHTOOL_SGSO:
- rc = ethtool_set_gso(dev, useraddr);
- break;
case ETHTOOL_GFLAGS:
rc = ethtool_get_value(dev, useraddr, ethcmd,
(dev->ethtool_ops->get_flags ?
@@ -1643,8 +1793,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
ethtool_op_get_flags));
break;
case ETHTOOL_SFLAGS:
- rc = ethtool_set_value(dev, useraddr,
- dev->ethtool_ops->set_flags);
+ rc = ethtool_set_value(dev, useraddr, __ethtool_set_flags);
break;
case ETHTOOL_GPFLAGS:
rc = ethtool_get_value(dev, useraddr, ethcmd,
@@ -1666,12 +1815,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_SRXCLSRLINS:
rc = ethtool_set_rxnfc(dev, ethcmd, useraddr);
break;
- case ETHTOOL_GGRO:
- rc = ethtool_get_gro(dev, useraddr);
- break;
- case ETHTOOL_SGRO:
- rc = ethtool_set_gro(dev, useraddr);
- break;
case ETHTOOL_FLASHDEV:
rc = ethtool_flash_device(dev, useraddr);
break;
@@ -1693,6 +1836,30 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_SRXFHINDIR:
rc = ethtool_set_rxfh_indir(dev, useraddr);
break;
+ case ETHTOOL_GFEATURES:
+ rc = ethtool_get_features(dev, useraddr);
+ break;
+ case ETHTOOL_SFEATURES:
+ rc = ethtool_set_features(dev, useraddr);
+ break;
+ case ETHTOOL_GTXCSUM:
+ case ETHTOOL_GRXCSUM:
+ case ETHTOOL_GSG:
+ case ETHTOOL_GTSO:
+ case ETHTOOL_GUFO:
+ case ETHTOOL_GGSO:
+ case ETHTOOL_GGRO:
+ rc = ethtool_get_one_feature(dev, useraddr, ethcmd);
+ break;
+ case ETHTOOL_STXCSUM:
+ case ETHTOOL_SRXCSUM:
+ case ETHTOOL_SSG:
+ case ETHTOOL_STSO:
+ case ETHTOOL_SUFO:
+ case ETHTOOL_SGSO:
+ case ETHTOOL_SGRO:
+ rc = ethtool_set_one_feature(dev, useraddr, ethcmd);
+ break;
default:
rc = -EOPNOTSUPP;
}
diff --git a/net/core/filter.c b/net/core/filter.c
index afc5837..232b187 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -142,14 +142,14 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
if (err)
return err;
- rcu_read_lock_bh();
- filter = rcu_dereference_bh(sk->sk_filter);
+ rcu_read_lock();
+ filter = rcu_dereference(sk->sk_filter);
if (filter) {
unsigned int pkt_len = sk_run_filter(skb, filter->insns);
err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
}
- rcu_read_unlock_bh();
+ rcu_read_unlock();
return err;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 60a9029..799f06e 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -316,7 +316,7 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int entries)
{
size_t size = entries * sizeof(struct neighbour *);
struct neigh_hash_table *ret;
- struct neighbour **buckets;
+ struct neighbour __rcu **buckets;
ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
if (!ret)
@@ -324,14 +324,14 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int entries)
if (size <= PAGE_SIZE)
buckets = kzalloc(size, GFP_ATOMIC);
else
- buckets = (struct neighbour **)
+ buckets = (struct neighbour __rcu **)
__get_free_pages(GFP_ATOMIC | __GFP_ZERO,
get_order(size));
if (!buckets) {
kfree(ret);
return NULL;
}
- rcu_assign_pointer(ret->hash_buckets, buckets);
+ ret->hash_buckets = buckets;
ret->hash_mask = entries - 1;
get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd));
return ret;
@@ -343,7 +343,7 @@ static void neigh_hash_free_rcu(struct rcu_head *head)
struct neigh_hash_table,
rcu);
size_t size = (nht->hash_mask + 1) * sizeof(struct neighbour *);
- struct neighbour **buckets = nht->hash_buckets;
+ struct neighbour __rcu **buckets = nht->hash_buckets;
if (size <= PAGE_SIZE)
kfree(buckets);
@@ -1540,7 +1540,7 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
panic("cannot create neighbour proc dir entry");
#endif
- tbl->nht = neigh_hash_alloc(8);
+ RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(8));
phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
@@ -1602,7 +1602,8 @@ int neigh_table_clear(struct neigh_table *tbl)
}
write_unlock(&neigh_tbl_lock);
- call_rcu(&tbl->nht->rcu, neigh_hash_free_rcu);
+ call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
+ neigh_hash_free_rcu);
tbl->nht = NULL;
kfree(tbl->phash_buckets);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index e23c01b..5ceb257 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -99,7 +99,7 @@ NETDEVICE_SHOW(addr_assign_type, fmt_dec);
NETDEVICE_SHOW(addr_len, fmt_dec);
NETDEVICE_SHOW(iflink, fmt_dec);
NETDEVICE_SHOW(ifindex, fmt_dec);
-NETDEVICE_SHOW(features, fmt_long_hex);
+NETDEVICE_SHOW(features, fmt_hex);
NETDEVICE_SHOW(type, fmt_dec);
NETDEVICE_SHOW(link_mode, fmt_dec);
@@ -295,6 +295,20 @@ static ssize_t show_ifalias(struct device *dev,
return ret;
}
+NETDEVICE_SHOW(group, fmt_dec);
+
+static int change_group(struct net_device *net, unsigned long new_group)
+{
+ dev_set_group(net, (int) new_group);
+ return 0;
+}
+
+static ssize_t store_group(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return netdev_store(dev, attr, buf, len, change_group);
+}
+
static struct device_attribute net_class_attributes[] = {
__ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL),
__ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
@@ -316,6 +330,7 @@ static struct device_attribute net_class_attributes[] = {
__ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags),
__ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len,
store_tx_queue_len),
+ __ATTR(netdev_group, S_IRUGO | S_IWUSR, show_group, store_group),
{}
};
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index a9e7fc4..d73b77a 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -251,6 +251,7 @@ struct pktgen_dev {
int max_pkt_size; /* = ETH_ZLEN; */
int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */
int nfrags;
+ struct page *page;
u64 delay; /* nano-seconds */
__u64 count; /* Default No packets to send */
@@ -1134,6 +1135,10 @@ static ssize_t pktgen_if_write(struct file *file,
if (node_possible(value)) {
pkt_dev->node = value;
sprintf(pg_result, "OK: node=%d", pkt_dev->node);
+ if (pkt_dev->page) {
+ put_page(pkt_dev->page);
+ pkt_dev->page = NULL;
+ }
}
else
sprintf(pg_result, "ERROR: node not possible");
@@ -2605,6 +2610,90 @@ static inline __be16 build_tci(unsigned int id, unsigned int cfi,
return htons(id | (cfi << 12) | (prio << 13));
}
+static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
+ int datalen)
+{
+ struct timeval timestamp;
+ struct pktgen_hdr *pgh;
+
+ pgh = (struct pktgen_hdr *)skb_put(skb, sizeof(*pgh));
+ datalen -= sizeof(*pgh);
+
+ if (pkt_dev->nfrags <= 0) {
+ pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
+ memset(pgh + 1, 0, datalen);
+ } else {
+ int frags = pkt_dev->nfrags;
+ int i, len;
+
+
+ if (frags > MAX_SKB_FRAGS)
+ frags = MAX_SKB_FRAGS;
+ len = datalen - frags * PAGE_SIZE;
+ if (len > 0) {
+ memset(skb_put(skb, len), 0, len);
+ datalen = frags * PAGE_SIZE;
+ }
+
+ i = 0;
+ while (datalen > 0) {
+ if (unlikely(!pkt_dev->page)) {
+ int node = numa_node_id();
+
+ if (pkt_dev->node >= 0 && (pkt_dev->flags & F_NODE))
+ node = pkt_dev->node;
+ pkt_dev->page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
+ if (!pkt_dev->page)
+ break;
+ }
+ skb_shinfo(skb)->frags[i].page = pkt_dev->page;
+ get_page(pkt_dev->page);
+ skb_shinfo(skb)->frags[i].page_offset = 0;
+ skb_shinfo(skb)->frags[i].size =
+ (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
+ datalen -= skb_shinfo(skb)->frags[i].size;
+ skb->len += skb_shinfo(skb)->frags[i].size;
+ skb->data_len += skb_shinfo(skb)->frags[i].size;
+ i++;
+ skb_shinfo(skb)->nr_frags = i;
+ }
+
+ while (i < frags) {
+ int rem;
+
+ if (i == 0)
+ break;
+
+ rem = skb_shinfo(skb)->frags[i - 1].size / 2;
+ if (rem == 0)
+ break;
+
+ skb_shinfo(skb)->frags[i - 1].size -= rem;
+
+ skb_shinfo(skb)->frags[i] =
+ skb_shinfo(skb)->frags[i - 1];
+ get_page(skb_shinfo(skb)->frags[i].page);
+ skb_shinfo(skb)->frags[i].page =
+ skb_shinfo(skb)->frags[i - 1].page;
+ skb_shinfo(skb)->frags[i].page_offset +=
+ skb_shinfo(skb)->frags[i - 1].size;
+ skb_shinfo(skb)->frags[i].size = rem;
+ i++;
+ skb_shinfo(skb)->nr_frags = i;
+ }
+ }
+
+ /* Stamp the time, and sequence number,
+ * convert them to network byte order
+ */
+ pgh->pgh_magic = htonl(PKTGEN_MAGIC);
+ pgh->seq_num = htonl(pkt_dev->seq_num);
+
+ do_gettimeofday(&timestamp);
+ pgh->tv_sec = htonl(timestamp.tv_sec);
+ pgh->tv_usec = htonl(timestamp.tv_usec);
+}
+
static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
struct pktgen_dev *pkt_dev)
{
@@ -2613,7 +2702,6 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
struct udphdr *udph;
int datalen, iplen;
struct iphdr *iph;
- struct pktgen_hdr *pgh = NULL;
__be16 protocol = htons(ETH_P_IP);
__be32 *mpls;
__be16 *vlan_tci = NULL; /* Encapsulates priority and VLAN ID */
@@ -2729,76 +2817,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
pkt_dev->pkt_overhead);
skb->dev = odev;
skb->pkt_type = PACKET_HOST;
-
- if (pkt_dev->nfrags <= 0) {
- pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
- memset(pgh + 1, 0, datalen - sizeof(struct pktgen_hdr));
- } else {
- int frags = pkt_dev->nfrags;
- int i, len;
-
- pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8);
-
- if (frags > MAX_SKB_FRAGS)
- frags = MAX_SKB_FRAGS;
- if (datalen > frags * PAGE_SIZE) {
- len = datalen - frags * PAGE_SIZE;
- memset(skb_put(skb, len), 0, len);
- datalen = frags * PAGE_SIZE;
- }
-
- i = 0;
- while (datalen > 0) {
- struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
- skb_shinfo(skb)->frags[i].page = page;
- skb_shinfo(skb)->frags[i].page_offset = 0;
- skb_shinfo(skb)->frags[i].size =
- (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
- datalen -= skb_shinfo(skb)->frags[i].size;
- skb->len += skb_shinfo(skb)->frags[i].size;
- skb->data_len += skb_shinfo(skb)->frags[i].size;
- i++;
- skb_shinfo(skb)->nr_frags = i;
- }
-
- while (i < frags) {
- int rem;
-
- if (i == 0)
- break;
-
- rem = skb_shinfo(skb)->frags[i - 1].size / 2;
- if (rem == 0)
- break;
-
- skb_shinfo(skb)->frags[i - 1].size -= rem;
-
- skb_shinfo(skb)->frags[i] =
- skb_shinfo(skb)->frags[i - 1];
- get_page(skb_shinfo(skb)->frags[i].page);
- skb_shinfo(skb)->frags[i].page =
- skb_shinfo(skb)->frags[i - 1].page;
- skb_shinfo(skb)->frags[i].page_offset +=
- skb_shinfo(skb)->frags[i - 1].size;
- skb_shinfo(skb)->frags[i].size = rem;
- i++;
- skb_shinfo(skb)->nr_frags = i;
- }
- }
-
- /* Stamp the time, and sequence number,
- * convert them to network byte order
- */
- if (pgh) {
- struct timeval timestamp;
-
- pgh->pgh_magic = htonl(PKTGEN_MAGIC);
- pgh->seq_num = htonl(pkt_dev->seq_num);
-
- do_gettimeofday(&timestamp);
- pgh->tv_sec = htonl(timestamp.tv_sec);
- pgh->tv_usec = htonl(timestamp.tv_usec);
- }
+ pktgen_finalize_skb(pkt_dev, skb, datalen);
#ifdef CONFIG_XFRM
if (!process_ipsec(pkt_dev, skb, protocol))
@@ -2980,7 +2999,6 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
struct udphdr *udph;
int datalen;
struct ipv6hdr *iph;
- struct pktgen_hdr *pgh = NULL;
__be16 protocol = htons(ETH_P_IPV6);
__be32 *mpls;
__be16 *vlan_tci = NULL; /* Encapsulates priority and VLAN ID */
@@ -3083,75 +3101,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
skb->dev = odev;
skb->pkt_type = PACKET_HOST;
- if (pkt_dev->nfrags <= 0)
- pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
- else {
- int frags = pkt_dev->nfrags;
- int i;
-
- pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8);
-
- if (frags > MAX_SKB_FRAGS)
- frags = MAX_SKB_FRAGS;
- if (datalen > frags * PAGE_SIZE) {
- skb_put(skb, datalen - frags * PAGE_SIZE);
- datalen = frags * PAGE_SIZE;
- }
-
- i = 0;
- while (datalen > 0) {
- struct page *page = alloc_pages(GFP_KERNEL, 0);
- skb_shinfo(skb)->frags[i].page = page;
- skb_shinfo(skb)->frags[i].page_offset = 0;
- skb_shinfo(skb)->frags[i].size =
- (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
- datalen -= skb_shinfo(skb)->frags[i].size;
- skb->len += skb_shinfo(skb)->frags[i].size;
- skb->data_len += skb_shinfo(skb)->frags[i].size;
- i++;
- skb_shinfo(skb)->nr_frags = i;
- }
-
- while (i < frags) {
- int rem;
-
- if (i == 0)
- break;
-
- rem = skb_shinfo(skb)->frags[i - 1].size / 2;
- if (rem == 0)
- break;
-
- skb_shinfo(skb)->frags[i - 1].size -= rem;
-
- skb_shinfo(skb)->frags[i] =
- skb_shinfo(skb)->frags[i - 1];
- get_page(skb_shinfo(skb)->frags[i].page);
- skb_shinfo(skb)->frags[i].page =
- skb_shinfo(skb)->frags[i - 1].page;
- skb_shinfo(skb)->frags[i].page_offset +=
- skb_shinfo(skb)->frags[i - 1].size;
- skb_shinfo(skb)->frags[i].size = rem;
- i++;
- skb_shinfo(skb)->nr_frags = i;
- }
- }
-
- /* Stamp the time, and sequence number,
- * convert them to network byte order
- * should we update cloned packets too ?
- */
- if (pgh) {
- struct timeval timestamp;
-
- pgh->pgh_magic = htonl(PKTGEN_MAGIC);
- pgh->seq_num = htonl(pkt_dev->seq_num);
-
- do_gettimeofday(&timestamp);
- pgh->tv_sec = htonl(timestamp.tv_sec);
- pgh->tv_usec = htonl(timestamp.tv_usec);
- }
- /* pkt_dev->seq_num++; FF: you really mean this? */
+ pktgen_finalize_skb(pkt_dev, skb, datalen);
return skb;
}
@@ -3884,6 +3834,8 @@ static int pktgen_remove_device(struct pktgen_thread *t,
free_SAs(pkt_dev);
#endif
vfree(pkt_dev->flows);
+ if (pkt_dev->page)
+ put_page(pkt_dev->page);
kfree(pkt_dev);
return 0;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 2d65c6b..49f7ea5 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -868,6 +868,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
netif_running(dev) ? dev->operstate : IF_OPER_DOWN);
NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode);
NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
+ NLA_PUT_U32(skb, IFLA_GROUP, dev->group);
if (dev->ifindex != dev->iflink)
NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
@@ -1035,6 +1036,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
[IFLA_MTU] = { .type = NLA_U32 },
[IFLA_LINK] = { .type = NLA_U32 },
+ [IFLA_MASTER] = { .type = NLA_U32 },
[IFLA_TXQLEN] = { .type = NLA_U32 },
[IFLA_WEIGHT] = { .type = NLA_U32 },
[IFLA_OPERSTATE] = { .type = NLA_U8 },
@@ -1177,6 +1179,41 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
return err;
}
+static int do_set_master(struct net_device *dev, int ifindex)
+{
+ struct net_device *master_dev;
+ const struct net_device_ops *ops;
+ int err;
+
+ if (dev->master) {
+ if (dev->master->ifindex == ifindex)
+ return 0;
+ ops = dev->master->netdev_ops;
+ if (ops->ndo_del_slave) {
+ err = ops->ndo_del_slave(dev->master, dev);
+ if (err)
+ return err;
+ } else {
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (ifindex) {
+ master_dev = __dev_get_by_index(dev_net(dev), ifindex);
+ if (!master_dev)
+ return -EINVAL;
+ ops = master_dev->netdev_ops;
+ if (ops->ndo_add_slave) {
+ err = ops->ndo_add_slave(master_dev, dev);
+ if (err)
+ return err;
+ } else {
+ return -EOPNOTSUPP;
+ }
+ }
+ return 0;
+}
+
static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
struct nlattr **tb, char *ifname, int modified)
{
@@ -1264,6 +1301,11 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
modified = 1;
}
+ if (tb[IFLA_GROUP]) {
+ dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
+ modified = 1;
+ }
+
/*
* Interface selected by interface index but interface
* name provided implies that a name change has been
@@ -1295,6 +1337,13 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
goto errout;
}
+ if (tb[IFLA_MASTER]) {
+ err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]));
+ if (err)
+ goto errout;
+ modified = 1;
+ }
+
if (tb[IFLA_TXQLEN])
dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
@@ -1541,6 +1590,8 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
if (tb[IFLA_LINKMODE])
dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
+ if (tb[IFLA_GROUP])
+ dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
return dev;
@@ -1551,6 +1602,24 @@ err:
}
EXPORT_SYMBOL(rtnl_create_link);
+static int rtnl_group_changelink(struct net *net, int group,
+ struct ifinfomsg *ifm,
+ struct nlattr **tb)
+{
+ struct net_device *dev;
+ int err;
+
+ for_each_netdev(net, dev) {
+ if (dev->group == group) {
+ err = do_setlink(dev, ifm, tb, NULL, 0);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
@@ -1578,10 +1647,12 @@ replay:
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(net, ifm->ifi_index);
- else if (ifname[0])
- dev = __dev_get_by_name(net, ifname);
- else
- dev = NULL;
+ else {
+ if (ifname[0])
+ dev = __dev_get_by_name(net, ifname);
+ else
+ dev = NULL;
+ }
err = validate_linkmsg(dev, tb);
if (err < 0)
@@ -1645,8 +1716,13 @@ replay:
return do_setlink(dev, ifm, tb, ifname, modified);
}
- if (!(nlh->nlmsg_flags & NLM_F_CREATE))
+ if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
+ if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
+ return rtnl_group_changelink(net,
+ nla_get_u32(tb[IFLA_GROUP]),
+ ifm, tb);
return -ENODEV;
+ }
if (ifm->ifi_index)
return -EOPNOTSUPP;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d883dcc..14cf560 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2498,7 +2498,7 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
* a pointer to the first in a list of new skbs for the segments.
* In case of error it returns ERR_PTR(err).
*/
-struct sk_buff *skb_segment(struct sk_buff *skb, int features)
+struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = NULL;
struct sk_buff *tail = NULL;
@@ -2508,7 +2508,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
unsigned int offset = doffset;
unsigned int headroom;
unsigned int len;
- int sg = features & NETIF_F_SG;
+ int sg = !!(features & NETIF_F_SG);
int nfrags = skb_shinfo(skb)->nr_frags;
int err = -ENOMEM;
int i = 0;
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index e96d5e8..fadecd2 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -583,6 +583,15 @@ done:
dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
}
+/*
+ * Convert RFC 3390 larger initial window into an equivalent number of packets.
+ * This is based on the numbers specified in RFC 5681, 3.1.
+ */
+static inline u32 rfc3390_bytes_to_packets(const u32 smss)
+{
+ return smss <= 1095 ? 4 : (smss > 2190 ? 2 : 3);
+}
+
static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
{
struct ccid2_hc_tx_sock *hc = ccid_priv(ccid);
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 5e63636..06c054d 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -112,6 +112,7 @@ static int dn_dst_gc(struct dst_ops *ops);
static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
static unsigned int dn_dst_default_mtu(const struct dst_entry *dst);
+static void dn_dst_destroy(struct dst_entry *);
static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
static void dn_dst_link_failure(struct sk_buff *);
static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu);
@@ -133,11 +134,18 @@ static struct dst_ops dn_dst_ops = {
.check = dn_dst_check,
.default_advmss = dn_dst_default_advmss,
.default_mtu = dn_dst_default_mtu,
+ .cow_metrics = dst_cow_metrics_generic,
+ .destroy = dn_dst_destroy,
.negative_advice = dn_dst_negative_advice,
.link_failure = dn_dst_link_failure,
.update_pmtu = dn_dst_update_pmtu,
};
+static void dn_dst_destroy(struct dst_entry *dst)
+{
+ dst_destroy_metrics_generic(dst);
+}
+
static __inline__ unsigned dn_hash(__le16 src, __le16 dst)
{
__u16 tmp = (__u16 __force)(src ^ dst);
@@ -814,14 +822,14 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
{
struct dn_fib_info *fi = res->fi;
struct net_device *dev = rt->dst.dev;
+ unsigned int mss_metric;
struct neighbour *n;
- unsigned int metric;
if (fi) {
if (DN_FIB_RES_GW(*res) &&
DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
rt->rt_gateway = DN_FIB_RES_GW(*res);
- dst_import_metrics(&rt->dst, fi->fib_metrics);
+ dst_init_metrics(&rt->dst, fi->fib_metrics, true);
}
rt->rt_type = res->type;
@@ -834,10 +842,10 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu);
- metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS);
- if (metric) {
+ mss_metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS);
+ if (mss_metric) {
unsigned int mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst));
- if (metric > mss)
+ if (mss_metric > mss)
dst_metric_set(&rt->dst, RTAX_ADVMSS, mss);
}
return 0;
@@ -1114,7 +1122,7 @@ make_route:
if (dev_out->flags & IFF_LOOPBACK)
flags |= RTCF_LOCAL;
- rt = dst_alloc(&dn_dst_ops);
+ rt = dst_alloc(&dn_dst_ops, 0);
if (rt == NULL)
goto e_nobufs;
@@ -1375,7 +1383,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
}
make_route:
- rt = dst_alloc(&dn_dst_ops);
+ rt = dst_alloc(&dn_dst_ops, 0);
if (rt == NULL)
goto e_nobufs;
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index f2abd37..b66600b 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -59,7 +59,6 @@ struct dn_hash
};
#define dz_key_0(key) ((key).datum = 0)
-#define dz_prefix(key,dz) ((key).datum)
#define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\
for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index a5a1050..cbb505b 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -55,45 +55,9 @@ config IP_ADVANCED_ROUTER
If unsure, say N here.
-choice
- prompt "Choose IP: FIB lookup algorithm (choose FIB_HASH if unsure)"
- depends on IP_ADVANCED_ROUTER
- default ASK_IP_FIB_HASH
-
-config ASK_IP_FIB_HASH
- bool "FIB_HASH"
- ---help---
- Current FIB is very proven and good enough for most users.
-
-config IP_FIB_TRIE
- bool "FIB_TRIE"
- ---help---
- Use new experimental LC-trie as FIB lookup algorithm.
- This improves lookup performance if you have a large
- number of routes.
-
- LC-trie is a longest matching prefix lookup algorithm which
- performs better than FIB_HASH for large routing tables.
- But, it consumes more memory and is more complex.
-
- LC-trie is described in:
-
- IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
- IEEE Journal on Selected Areas in Communications, 17(6):1083-1092,
- June 1999
-
- An experimental study of compression methods for dynamic tries
- Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
- <http://www.csc.kth.se/~snilsson/software/dyntrie2/>
-
-endchoice
-
-config IP_FIB_HASH
- def_bool ASK_IP_FIB_HASH || !IP_ADVANCED_ROUTER
-
config IP_FIB_TRIE_STATS
bool "FIB TRIE statistics"
- depends on IP_FIB_TRIE
+ depends on IP_ADVANCED_ROUTER
---help---
Keep track of statistics on structure of FIB TRIE table.
Useful for testing and measuring TRIE performance.
@@ -140,6 +104,9 @@ config IP_ROUTE_VERBOSE
handled by the klogd daemon which is responsible for kernel messages
("man klogd").
+config IP_ROUTE_CLASSID
+ bool
+
config IP_PNP
bool "IP: kernel level autoconfiguration"
help
@@ -657,4 +624,3 @@ config TCP_MD5SIG
on the Internet.
If unsure, say N.
-
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 4978d22..0dc772d 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -10,12 +10,10 @@ obj-y := route.o inetpeer.o protocol.o \
tcp_minisocks.o tcp_cong.o \
datagram.o raw.o udp.o udplite.o \
arp.o icmp.o devinet.o af_inet.o igmp.o \
- fib_frontend.o fib_semantics.o \
+ fib_frontend.o fib_semantics.o fib_trie.o \
inet_fragment.o
obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
-obj-$(CONFIG_IP_FIB_HASH) += fib_hash.o
-obj-$(CONFIG_IP_FIB_TRIE) += fib_trie.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
obj-$(CONFIG_IP_MROUTE) += ipmr.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 45b89d7..7ceb804 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1231,7 +1231,7 @@ out:
return err;
}
-static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
+static struct sk_buff *inet_gso_segment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct iphdr *iph;
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 86961be..325053d 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -201,7 +201,10 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
top_iph->ttl = 0;
top_iph->check = 0;
- ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
+ if (x->props.flags & XFRM_STATE_ALIGN4)
+ ah->hdrlen = (XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
+ else
+ ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
ah->reserved = 0;
ah->spi = x->id.spi;
@@ -299,9 +302,15 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
nexthdr = ah->nexthdr;
ah_hlen = (ah->hdrlen + 2) << 2;
- if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
- ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
- goto out;
+ if (x->props.flags & XFRM_STATE_ALIGN4) {
+ if (ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_full_len) &&
+ ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len))
+ goto out;
+ } else {
+ if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
+ ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
+ goto out;
+ }
if (!pskb_may_pull(skb, ah_hlen))
goto out;
@@ -450,8 +459,12 @@ static int ah_init_state(struct xfrm_state *x)
BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);
- x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
- ahp->icv_trunc_len);
+ if (x->props.flags & XFRM_STATE_ALIGN4)
+ x->props.header_len = XFRM_ALIGN4(sizeof(struct ip_auth_hdr) +
+ ahp->icv_trunc_len);
+ else
+ x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
+ ahp->icv_trunc_len);
if (x->props.mode == XFRM_MODE_TUNNEL)
x->props.header_len += sizeof(struct iphdr);
x->data = ahp;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index df4616f..9038928 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -51,6 +51,7 @@
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/slab.h>
+#include <linux/hash.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
@@ -92,6 +93,71 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
[IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
};
+/* inet_addr_hash's shifting is dependent upon this IN4_ADDR_HSIZE
+ * value. So if you change this define, make appropriate changes to
+ * inet_addr_hash as well.
+ */
+#define IN4_ADDR_HSIZE 256
+static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
+static DEFINE_SPINLOCK(inet_addr_hash_lock);
+
+static inline unsigned int inet_addr_hash(struct net *net, __be32 addr)
+{
+ u32 val = (__force u32) addr ^ hash_ptr(net, 8);
+
+ return ((val ^ (val >> 8) ^ (val >> 16) ^ (val >> 24)) &
+ (IN4_ADDR_HSIZE - 1));
+}
+
+static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
+{
+ unsigned int hash = inet_addr_hash(net, ifa->ifa_address);
+
+ spin_lock(&inet_addr_hash_lock);
+ hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
+ spin_unlock(&inet_addr_hash_lock);
+}
+
+static void inet_hash_remove(struct in_ifaddr *ifa)
+{
+ spin_lock(&inet_addr_hash_lock);
+ hlist_del_init_rcu(&ifa->hash);
+ spin_unlock(&inet_addr_hash_lock);
+}
+
+/**
+ * __ip_dev_find - find the first device with a given source address.
+ * @net: the net namespace
+ * @addr: the source address
+ * @devref: if true, take a reference on the found device
+ *
+ * If a caller uses devref=false, it should be protected by RCU, or RTNL
+ */
+struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
+{
+ unsigned int hash = inet_addr_hash(net, addr);
+ struct net_device *result = NULL;
+ struct in_ifaddr *ifa;
+ struct hlist_node *node;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) {
+ struct net_device *dev = ifa->ifa_dev->dev;
+
+ if (!net_eq(dev_net(dev), net))
+ continue;
+ if (ifa->ifa_address == addr) {
+ result = dev;
+ break;
+ }
+ }
+ if (result && devref)
+ dev_hold(result);
+ rcu_read_unlock();
+ return result;
+}
+EXPORT_SYMBOL(__ip_dev_find);
+
static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
@@ -265,6 +331,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
}
if (!do_promote) {
+ inet_hash_remove(ifa);
*ifap1 = ifa->ifa_next;
rtmsg_ifa(RTM_DELADDR, ifa, nlh, pid);
@@ -281,6 +348,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
/* 2. Unlink it */
*ifap = ifa1->ifa_next;
+ inet_hash_remove(ifa1);
/* 3. Announce address deletion */
@@ -368,6 +436,8 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
ifa->ifa_next = *ifap;
*ifap = ifa;
+ inet_hash_insert(dev_net(in_dev->dev), ifa);
+
/* Send message first, then call notifier.
Notifier will trigger FIB update, so that
listeners of netlink will know about new ifaddr */
@@ -521,6 +591,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh)
if (tb[IFA_ADDRESS] == NULL)
tb[IFA_ADDRESS] = tb[IFA_LOCAL];
+ INIT_HLIST_NODE(&ifa->hash);
ifa->ifa_prefixlen = ifm->ifa_prefixlen;
ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
ifa->ifa_flags = ifm->ifa_flags;
@@ -728,6 +799,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
if (!ifa) {
ret = -ENOBUFS;
ifa = inet_alloc_ifa();
+ INIT_HLIST_NODE(&ifa->hash);
if (!ifa)
break;
if (colon)
@@ -1084,6 +1156,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
struct in_ifaddr *ifa = inet_alloc_ifa();
if (ifa) {
+ INIT_HLIST_NODE(&ifa->hash);
ifa->ifa_local =
ifa->ifa_address = htonl(INADDR_LOOPBACK);
ifa->ifa_prefixlen = 8;
@@ -1720,6 +1793,11 @@ static struct rtnl_af_ops inet_af_ops = {
void __init devinet_init(void)
{
+ int i;
+
+ for (i = 0; i < IN4_ADDR_HSIZE; i++)
+ INIT_HLIST_HEAD(&inet_addr_lst[i]);
+
register_pernet_subsys(&devinet_ops);
register_gifconf(PF_INET, inet_gifconf);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 1d2cdd4..ad0778a 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -51,11 +51,11 @@ static int __net_init fib4_rules_init(struct net *net)
{
struct fib_table *local_table, *main_table;
- local_table = fib_hash_table(RT_TABLE_LOCAL);
+ local_table = fib_trie_table(RT_TABLE_LOCAL);
if (local_table == NULL)
return -ENOMEM;
- main_table = fib_hash_table(RT_TABLE_MAIN);
+ main_table = fib_trie_table(RT_TABLE_MAIN);
if (main_table == NULL)
goto fail;
@@ -82,7 +82,7 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
if (tb)
return tb;
- tb = fib_hash_table(id);
+ tb = fib_trie_table(id);
if (!tb)
return NULL;
h = id & (FIB_TABLE_HASHSZ - 1);
@@ -114,21 +114,6 @@ struct fib_table *fib_get_table(struct net *net, u32 id)
}
#endif /* CONFIG_IP_MULTIPLE_TABLES */
-void fib_select_default(struct net *net,
- const struct flowi *flp, struct fib_result *res)
-{
- struct fib_table *tb;
- int table = RT_TABLE_MAIN;
-#ifdef CONFIG_IP_MULTIPLE_TABLES
- if (res->r == NULL || res->r->action != FR_ACT_TO_TBL)
- return;
- table = res->r->table;
-#endif
- tb = fib_get_table(net, table);
- if (FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
- fib_table_select_default(tb, flp, res);
-}
-
static void fib_flush(struct net *net)
{
int flushed = 0;
@@ -147,46 +132,6 @@ static void fib_flush(struct net *net)
rt_cache_flush(net, -1);
}
-/**
- * __ip_dev_find - find the first device with a given source address.
- * @net: the net namespace
- * @addr: the source address
- * @devref: if true, take a reference on the found device
- *
- * If a caller uses devref=false, it should be protected by RCU, or RTNL
- */
-struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
-{
- struct flowi fl = {
- .fl4_dst = addr,
- };
- struct fib_result res = { 0 };
- struct net_device *dev = NULL;
- struct fib_table *local_table;
-
-#ifdef CONFIG_IP_MULTIPLE_TABLES
- res.r = NULL;
-#endif
-
- rcu_read_lock();
- local_table = fib_get_table(net, RT_TABLE_LOCAL);
- if (!local_table ||
- fib_table_lookup(local_table, &fl, &res, FIB_LOOKUP_NOREF)) {
- rcu_read_unlock();
- return NULL;
- }
- if (res.type != RTN_LOCAL)
- goto out;
- dev = FIB_RES_DEV(res);
-
- if (dev && devref)
- dev_hold(dev);
-out:
- rcu_read_unlock();
- return dev;
-}
-EXPORT_SYMBOL(__ip_dev_find);
-
/*
* Find address type as if only "dev" was present in the system. If
* on_dev is NULL then all interfaces are taken into consideration.
@@ -1101,5 +1046,5 @@ void __init ip_fib_init(void)
register_netdevice_notifier(&fib_netdev_notifier);
register_inetaddr_notifier(&fib_inetaddr_notifier);
- fib_hash_init();
+ fib_trie_init();
}
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
deleted file mode 100644
index b3acb04..0000000
--- a/net/ipv4/fib_hash.c
+++ /dev/null
@@ -1,1133 +0,0 @@
-/*
- * INET An implementation of the TCP/IP protocol suite for the LINUX
- * operating system. INET is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * IPv4 FIB: lookup engine and maintenance routines.
- *
- * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/errno.h>
-#include <linux/in.h>
-#include <linux/inet.h>
-#include <linux/inetdevice.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/proc_fs.h>
-#include <linux/skbuff.h>
-#include <linux/netlink.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-
-#include <net/net_namespace.h>
-#include <net/ip.h>
-#include <net/protocol.h>
-#include <net/route.h>
-#include <net/tcp.h>
-#include <net/sock.h>
-#include <net/ip_fib.h>
-
-#include "fib_lookup.h"
-
-static struct kmem_cache *fn_hash_kmem __read_mostly;
-static struct kmem_cache *fn_alias_kmem __read_mostly;
-
-struct fib_node {
- struct hlist_node fn_hash;
- struct list_head fn_alias;
- __be32 fn_key;
- struct fib_alias fn_embedded_alias;
-};
-
-#define EMBEDDED_HASH_SIZE (L1_CACHE_BYTES / sizeof(struct hlist_head))
-
-struct fn_zone {
- struct fn_zone __rcu *fz_next; /* Next not empty zone */
- struct hlist_head __rcu *fz_hash; /* Hash table pointer */
- seqlock_t fz_lock;
- u32 fz_hashmask; /* (fz_divisor - 1) */
-
- u8 fz_order; /* Zone order (0..32) */
- u8 fz_revorder; /* 32 - fz_order */
- __be32 fz_mask; /* inet_make_mask(order) */
-#define FZ_MASK(fz) ((fz)->fz_mask)
-
- struct hlist_head fz_embedded_hash[EMBEDDED_HASH_SIZE];
-
- int fz_nent; /* Number of entries */
- int fz_divisor; /* Hash size (mask+1) */
-};
-
-struct fn_hash {
- struct fn_zone *fn_zones[33];
- struct fn_zone __rcu *fn_zone_list;
-};
-
-static inline u32 fn_hash(__be32 key, struct fn_zone *fz)
-{
- u32 h = ntohl(key) >> fz->fz_revorder;
- h ^= (h>>20);
- h ^= (h>>10);
- h ^= (h>>5);
- h &= fz->fz_hashmask;
- return h;
-}
-
-static inline __be32 fz_key(__be32 dst, struct fn_zone *fz)
-{
- return dst & FZ_MASK(fz);
-}
-
-static unsigned int fib_hash_genid;
-
-#define FZ_MAX_DIVISOR ((PAGE_SIZE<<MAX_ORDER) / sizeof(struct hlist_head))
-
-static struct hlist_head *fz_hash_alloc(int divisor)
-{
- unsigned long size = divisor * sizeof(struct hlist_head);
-
- if (size <= PAGE_SIZE)
- return kzalloc(size, GFP_KERNEL);
-
- return (struct hlist_head *)
- __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(size));
-}
-
-/* The fib hash lock must be held when this is called. */
-static inline void fn_rebuild_zone(struct fn_zone *fz,
- struct hlist_head *old_ht,
- int old_divisor)
-{
- int i;
-
- for (i = 0; i < old_divisor; i++) {
- struct hlist_node *node, *n;
- struct fib_node *f;
-
- hlist_for_each_entry_safe(f, node, n, &old_ht[i], fn_hash) {
- struct hlist_head *new_head;
-
- hlist_del_rcu(&f->fn_hash);
-
- new_head = rcu_dereference_protected(fz->fz_hash, 1) +
- fn_hash(f->fn_key, fz);
- hlist_add_head_rcu(&f->fn_hash, new_head);
- }
- }
-}
-
-static void fz_hash_free(struct hlist_head *hash, int divisor)
-{
- unsigned long size = divisor * sizeof(struct hlist_head);
-
- if (size <= PAGE_SIZE)
- kfree(hash);
- else
- free_pages((unsigned long)hash, get_order(size));
-}
-
-static void fn_rehash_zone(struct fn_zone *fz)
-{
- struct hlist_head *ht, *old_ht;
- int old_divisor, new_divisor;
- u32 new_hashmask;
-
- new_divisor = old_divisor = fz->fz_divisor;
-
- switch (old_divisor) {
- case EMBEDDED_HASH_SIZE:
- new_divisor *= EMBEDDED_HASH_SIZE;
- break;
- case EMBEDDED_HASH_SIZE*EMBEDDED_HASH_SIZE:
- new_divisor *= (EMBEDDED_HASH_SIZE/2);
- break;
- default:
- if ((old_divisor << 1) > FZ_MAX_DIVISOR) {
- printk(KERN_CRIT "route.c: bad divisor %d!\n", old_divisor);
- return;
- }
- new_divisor = (old_divisor << 1);
- break;
- }
-
- new_hashmask = (new_divisor - 1);
-
-#if RT_CACHE_DEBUG >= 2
- printk(KERN_DEBUG "fn_rehash_zone: hash for zone %d grows from %d\n",
- fz->fz_order, old_divisor);
-#endif
-
- ht = fz_hash_alloc(new_divisor);
-
- if (ht) {
- struct fn_zone nfz;
-
- memcpy(&nfz, fz, sizeof(nfz));
-
- write_seqlock_bh(&fz->fz_lock);
- old_ht = rcu_dereference_protected(fz->fz_hash, 1);
- RCU_INIT_POINTER(nfz.fz_hash, ht);
- nfz.fz_hashmask = new_hashmask;
- nfz.fz_divisor = new_divisor;
- fn_rebuild_zone(&nfz, old_ht, old_divisor);
- fib_hash_genid++;
- rcu_assign_pointer(fz->fz_hash, ht);
- fz->fz_hashmask = new_hashmask;
- fz->fz_divisor = new_divisor;
- write_sequnlock_bh(&fz->fz_lock);
-
- if (old_ht != fz->fz_embedded_hash) {
- synchronize_rcu();
- fz_hash_free(old_ht, old_divisor);
- }
- }
-}
-
-static void fn_free_node_rcu(struct rcu_head *head)
-{
- struct fib_node *f = container_of(head, struct fib_node, fn_embedded_alias.rcu);
-
- kmem_cache_free(fn_hash_kmem, f);
-}
-
-static inline void fn_free_node(struct fib_node *f)
-{
- call_rcu(&f->fn_embedded_alias.rcu, fn_free_node_rcu);
-}
-
-static void fn_free_alias_rcu(struct rcu_head *head)
-{
- struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
-
- kmem_cache_free(fn_alias_kmem, fa);
-}
-
-static inline void fn_free_alias(struct fib_alias *fa, struct fib_node *f)
-{
- fib_release_info(fa->fa_info);
- if (fa == &f->fn_embedded_alias)
- fa->fa_info = NULL;
- else
- call_rcu(&fa->rcu, fn_free_alias_rcu);
-}
-
-static struct fn_zone *
-fn_new_zone(struct fn_hash *table, int z)
-{
- int i;
- struct fn_zone *fz = kzalloc(sizeof(struct fn_zone), GFP_KERNEL);
- if (!fz)
- return NULL;
-
- seqlock_init(&fz->fz_lock);
- fz->fz_divisor = z ? EMBEDDED_HASH_SIZE : 1;
- fz->fz_hashmask = fz->fz_divisor - 1;
- RCU_INIT_POINTER(fz->fz_hash, fz->fz_embedded_hash);
- fz->fz_order = z;
- fz->fz_revorder = 32 - z;
- fz->fz_mask = inet_make_mask(z);
-
- /* Find the first not empty zone with more specific mask */
- for (i = z + 1; i <= 32; i++)
- if (table->fn_zones[i])
- break;
- if (i > 32) {
- /* No more specific masks, we are the first. */
- rcu_assign_pointer(fz->fz_next,
- rtnl_dereference(table->fn_zone_list));
- rcu_assign_pointer(table->fn_zone_list, fz);
- } else {
- rcu_assign_pointer(fz->fz_next,
- rtnl_dereference(table->fn_zones[i]->fz_next));
- rcu_assign_pointer(table->fn_zones[i]->fz_next, fz);
- }
- table->fn_zones[z] = fz;
- fib_hash_genid++;
- return fz;
-}
-
-int fib_table_lookup(struct fib_table *tb,
- const struct flowi *flp, struct fib_result *res,
- int fib_flags)
-{
- int err;
- struct fn_zone *fz;
- struct fn_hash *t = (struct fn_hash *)tb->tb_data;
-
- rcu_read_lock();
- for (fz = rcu_dereference(t->fn_zone_list);
- fz != NULL;
- fz = rcu_dereference(fz->fz_next)) {
- struct hlist_head *head;
- struct hlist_node *node;
- struct fib_node *f;
- __be32 k;
- unsigned int seq;
-
- do {
- seq = read_seqbegin(&fz->fz_lock);
- k = fz_key(flp->fl4_dst, fz);
-
- head = rcu_dereference(fz->fz_hash) + fn_hash(k, fz);
- hlist_for_each_entry_rcu(f, node, head, fn_hash) {
- if (f->fn_key != k)
- continue;
-
- err = fib_semantic_match(&f->fn_alias,
- flp, res,
- fz->fz_order, fib_flags);
- if (err <= 0)
- goto out;
- }
- } while (read_seqretry(&fz->fz_lock, seq));
- }
- err = 1;
-out:
- rcu_read_unlock();
- return err;
-}
-
-void fib_table_select_default(struct fib_table *tb,
- const struct flowi *flp, struct fib_result *res)
-{
- int order, last_idx;
- struct hlist_node *node;
- struct fib_node *f;
- struct fib_info *fi = NULL;
- struct fib_info *last_resort;
- struct fn_hash *t = (struct fn_hash *)tb->tb_data;
- struct fn_zone *fz = t->fn_zones[0];
- struct hlist_head *head;
-
- if (fz == NULL)
- return;
-
- last_idx = -1;
- last_resort = NULL;
- order = -1;
-
- rcu_read_lock();
- head = rcu_dereference(fz->fz_hash);
- hlist_for_each_entry_rcu(f, node, head, fn_hash) {
- struct fib_alias *fa;
-
- list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) {
- struct fib_info *next_fi = fa->fa_info;
-
- if (fa->fa_scope != res->scope ||
- fa->fa_type != RTN_UNICAST)
- continue;
-
- if (next_fi->fib_priority > res->fi->fib_priority)
- break;
- if (!next_fi->fib_nh[0].nh_gw ||
- next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
- continue;
-
- fib_alias_accessed(fa);
-
- if (fi == NULL) {
- if (next_fi != res->fi)
- break;
- } else if (!fib_detect_death(fi, order, &last_resort,
- &last_idx, tb->tb_default)) {
- fib_result_assign(res, fi);
- tb->tb_default = order;
- goto out;
- }
- fi = next_fi;
- order++;
- }
- }
-
- if (order <= 0 || fi == NULL) {
- tb->tb_default = -1;
- goto out;
- }
-
- if (!fib_detect_death(fi, order, &last_resort, &last_idx,
- tb->tb_default)) {
- fib_result_assign(res, fi);
- tb->tb_default = order;
- goto out;
- }
-
- if (last_idx >= 0)
- fib_result_assign(res, last_resort);
- tb->tb_default = last_idx;
-out:
- rcu_read_unlock();
-}
-
-/* Insert node F to FZ. */
-static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f)
-{
- struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(f->fn_key, fz);
-
- hlist_add_head_rcu(&f->fn_hash, head);
-}
-
-/* Return the node in FZ matching KEY. */
-static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key)
-{
- struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(key, fz);
- struct hlist_node *node;
- struct fib_node *f;
-
- hlist_for_each_entry_rcu(f, node, head, fn_hash) {
- if (f->fn_key == key)
- return f;
- }
-
- return NULL;
-}
-
-
-static struct fib_alias *fib_fast_alloc(struct fib_node *f)
-{
- struct fib_alias *fa = &f->fn_embedded_alias;
-
- if (fa->fa_info != NULL)
- fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
- return fa;
-}
-
-/* Caller must hold RTNL. */
-int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
-{
- struct fn_hash *table = (struct fn_hash *) tb->tb_data;
- struct fib_node *new_f = NULL;
- struct fib_node *f;
- struct fib_alias *fa, *new_fa;
- struct fn_zone *fz;
- struct fib_info *fi;
- u8 tos = cfg->fc_tos;
- __be32 key;
- int err;
-
- if (cfg->fc_dst_len > 32)
- return -EINVAL;
-
- fz = table->fn_zones[cfg->fc_dst_len];
- if (!fz && !(fz = fn_new_zone(table, cfg->fc_dst_len)))
- return -ENOBUFS;
-
- key = 0;
- if (cfg->fc_dst) {
- if (cfg->fc_dst & ~FZ_MASK(fz))
- return -EINVAL;
- key = fz_key(cfg->fc_dst, fz);
- }
-
- fi = fib_create_info(cfg);
- if (IS_ERR(fi))
- return PTR_ERR(fi);
-
- if (fz->fz_nent > (fz->fz_divisor<<1) &&
- fz->fz_divisor < FZ_MAX_DIVISOR &&
- (cfg->fc_dst_len == 32 ||
- (1 << cfg->fc_dst_len) > fz->fz_divisor))
- fn_rehash_zone(fz);
-
- f = fib_find_node(fz, key);
-
- if (!f)
- fa = NULL;
- else
- fa = fib_find_alias(&f->fn_alias, tos, fi->fib_priority);
-
- /* Now fa, if non-NULL, points to the first fib alias
- * with the same keys [prefix,tos,priority], if such key already
- * exists or to the node before which we will insert new one.
- *
- * If fa is NULL, we will need to allocate a new one and
- * insert to the head of f.
- *
- * If f is NULL, no fib node matched the destination key
- * and we need to allocate a new one of those as well.
- */
-
- if (fa && fa->fa_tos == tos &&
- fa->fa_info->fib_priority == fi->fib_priority) {
- struct fib_alias *fa_first, *fa_match;
-
- err = -EEXIST;
- if (cfg->fc_nlflags & NLM_F_EXCL)
- goto out;
-
- /* We have 2 goals:
- * 1. Find exact match for type, scope, fib_info to avoid
- * duplicate routes
- * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
- */
- fa_match = NULL;
- fa_first = fa;
- fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
- list_for_each_entry_continue(fa, &f->fn_alias, fa_list) {
- if (fa->fa_tos != tos)
- break;
- if (fa->fa_info->fib_priority != fi->fib_priority)
- break;
- if (fa->fa_type == cfg->fc_type &&
- fa->fa_scope == cfg->fc_scope &&
- fa->fa_info == fi) {
- fa_match = fa;
- break;
- }
- }
-
- if (cfg->fc_nlflags & NLM_F_REPLACE) {
- u8 state;
-
- fa = fa_first;
- if (fa_match) {
- if (fa == fa_match)
- err = 0;
- goto out;
- }
- err = -ENOBUFS;
- new_fa = fib_fast_alloc(f);
- if (new_fa == NULL)
- goto out;
-
- new_fa->fa_tos = fa->fa_tos;
- new_fa->fa_info = fi;
- new_fa->fa_type = cfg->fc_type;
- new_fa->fa_scope = cfg->fc_scope;
- state = fa->fa_state;
- new_fa->fa_state = state & ~FA_S_ACCESSED;
- fib_hash_genid++;
- list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
-
- fn_free_alias(fa, f);
- if (state & FA_S_ACCESSED)
- rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
- rtmsg_fib(RTM_NEWROUTE, key, new_fa, cfg->fc_dst_len,
- tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
- return 0;
- }
-
- /* Error if we find a perfect match which
- * uses the same scope, type, and nexthop
- * information.
- */
- if (fa_match)
- goto out;
-
- if (!(cfg->fc_nlflags & NLM_F_APPEND))
- fa = fa_first;
- }
-
- err = -ENOENT;
- if (!(cfg->fc_nlflags & NLM_F_CREATE))
- goto out;
-
- err = -ENOBUFS;
-
- if (!f) {
- new_f = kmem_cache_zalloc(fn_hash_kmem, GFP_KERNEL);
- if (new_f == NULL)
- goto out;
-
- INIT_HLIST_NODE(&new_f->fn_hash);
- INIT_LIST_HEAD(&new_f->fn_alias);
- new_f->fn_key = key;
- f = new_f;
- }
-
- new_fa = fib_fast_alloc(f);
- if (new_fa == NULL)
- goto out;
-
- new_fa->fa_info = fi;
- new_fa->fa_tos = tos;
- new_fa->fa_type = cfg->fc_type;
- new_fa->fa_scope = cfg->fc_scope;
- new_fa->fa_state = 0;
-
- /*
- * Insert new entry to the list.
- */
-
- if (new_f)
- fib_insert_node(fz, new_f);
- list_add_tail_rcu(&new_fa->fa_list,
- (fa ? &fa->fa_list : &f->fn_alias));
- fib_hash_genid++;
-
- if (new_f)
- fz->fz_nent++;
- rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
-
- rtmsg_fib(RTM_NEWROUTE, key, new_fa, cfg->fc_dst_len, tb->tb_id,
- &cfg->fc_nlinfo, 0);
- return 0;
-
-out:
- if (new_f)
- kmem_cache_free(fn_hash_kmem, new_f);
- fib_release_info(fi);
- return err;
-}
-
-int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
-{
- struct fn_hash *table = (struct fn_hash *)tb->tb_data;
- struct fib_node *f;
- struct fib_alias *fa, *fa_to_delete;
- struct fn_zone *fz;
- __be32 key;
-
- if (cfg->fc_dst_len > 32)
- return -EINVAL;
-
- if ((fz = table->fn_zones[cfg->fc_dst_len]) == NULL)
- return -ESRCH;
-
- key = 0;
- if (cfg->fc_dst) {
- if (cfg->fc_dst & ~FZ_MASK(fz))
- return -EINVAL;
- key = fz_key(cfg->fc_dst, fz);
- }
-
- f = fib_find_node(fz, key);
-
- if (!f)
- fa = NULL;
- else
- fa = fib_find_alias(&f->fn_alias, cfg->fc_tos, 0);
- if (!fa)
- return -ESRCH;
-
- fa_to_delete = NULL;
- fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
- list_for_each_entry_continue(fa, &f->fn_alias, fa_list) {
- struct fib_info *fi = fa->fa_info;
-
- if (fa->fa_tos != cfg->fc_tos)
- break;
-
- if ((!cfg->fc_type ||
- fa->fa_type == cfg->fc_type) &&
- (cfg->fc_scope == RT_SCOPE_NOWHERE ||
- fa->fa_scope == cfg->fc_scope) &&
- (!cfg->fc_protocol ||
- fi->fib_protocol == cfg->fc_protocol) &&
- fib_nh_match(cfg, fi) == 0) {
- fa_to_delete = fa;
- break;
- }
- }
-
- if (fa_to_delete) {
- int kill_fn;
-
- fa = fa_to_delete;
- rtmsg_fib(RTM_DELROUTE, key, fa, cfg->fc_dst_len,
- tb->tb_id, &cfg->fc_nlinfo, 0);
-
- kill_fn = 0;
- list_del_rcu(&fa->fa_list);
- if (list_empty(&f->fn_alias)) {
- hlist_del_rcu(&f->fn_hash);
- kill_fn = 1;
- }
- fib_hash_genid++;
-
- if (fa->fa_state & FA_S_ACCESSED)
- rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
- fn_free_alias(fa, f);
- if (kill_fn) {
- fn_free_node(f);
- fz->fz_nent--;
- }
-
- return 0;
- }
- return -ESRCH;
-}
-
-static int fn_flush_list(struct fn_zone *fz, int idx)
-{
- struct hlist_head *head = rtnl_dereference(fz->fz_hash) + idx;
- struct hlist_node *node, *n;
- struct fib_node *f;
- int found = 0;
-
- hlist_for_each_entry_safe(f, node, n, head, fn_hash) {
- struct fib_alias *fa, *fa_node;
- int kill_f;
-
- kill_f = 0;
- list_for_each_entry_safe(fa, fa_node, &f->fn_alias, fa_list) {
- struct fib_info *fi = fa->fa_info;
-
- if (fi && (fi->fib_flags&RTNH_F_DEAD)) {
- list_del_rcu(&fa->fa_list);
- if (list_empty(&f->fn_alias)) {
- hlist_del_rcu(&f->fn_hash);
- kill_f = 1;
- }
- fib_hash_genid++;
-
- fn_free_alias(fa, f);
- found++;
- }
- }
- if (kill_f) {
- fn_free_node(f);
- fz->fz_nent--;
- }
- }
- return found;
-}
-
-/* caller must hold RTNL. */
-int fib_table_flush(struct fib_table *tb)
-{
- struct fn_hash *table = (struct fn_hash *) tb->tb_data;
- struct fn_zone *fz;
- int found = 0;
-
- for (fz = rtnl_dereference(table->fn_zone_list);
- fz != NULL;
- fz = rtnl_dereference(fz->fz_next)) {
- int i;
-
- for (i = fz->fz_divisor - 1; i >= 0; i--)
- found += fn_flush_list(fz, i);
- }
- return found;
-}
-
-void fib_free_table(struct fib_table *tb)
-{
- struct fn_hash *table = (struct fn_hash *) tb->tb_data;
- struct fn_zone *fz, *next;
-
- next = table->fn_zone_list;
- while (next != NULL) {
- fz = next;
- next = fz->fz_next;
-
- if (fz->fz_hash != fz->fz_embedded_hash)
- fz_hash_free(fz->fz_hash, fz->fz_divisor);
-
- kfree(fz);
- }
-
- kfree(tb);
-}
-
-static inline int
-fn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb,
- struct fib_table *tb,
- struct fn_zone *fz,
- struct hlist_head *head)
-{
- struct hlist_node *node;
- struct fib_node *f;
- int i, s_i;
-
- s_i = cb->args[4];
- i = 0;
- hlist_for_each_entry_rcu(f, node, head, fn_hash) {
- struct fib_alias *fa;
-
- list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) {
- if (i < s_i)
- goto next;
-
- if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq,
- RTM_NEWROUTE,
- tb->tb_id,
- fa->fa_type,
- fa->fa_scope,
- f->fn_key,
- fz->fz_order,
- fa->fa_tos,
- fa->fa_info,
- NLM_F_MULTI) < 0) {
- cb->args[4] = i;
- return -1;
- }
-next:
- i++;
- }
- }
- cb->args[4] = i;
- return skb->len;
-}
-
-static inline int
-fn_hash_dump_zone(struct sk_buff *skb, struct netlink_callback *cb,
- struct fib_table *tb,
- struct fn_zone *fz)
-{
- int h, s_h;
- struct hlist_head *head = rcu_dereference(fz->fz_hash);
-
- if (head == NULL)
- return skb->len;
- s_h = cb->args[3];
- for (h = s_h; h < fz->fz_divisor; h++) {
- if (hlist_empty(head + h))
- continue;
- if (fn_hash_dump_bucket(skb, cb, tb, fz, head + h) < 0) {
- cb->args[3] = h;
- return -1;
- }
- memset(&cb->args[4], 0,
- sizeof(cb->args) - 4*sizeof(cb->args[0]));
- }
- cb->args[3] = h;
- return skb->len;
-}
-
-int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
- struct netlink_callback *cb)
-{
- int m = 0, s_m;
- struct fn_zone *fz;
- struct fn_hash *table = (struct fn_hash *)tb->tb_data;
-
- s_m = cb->args[2];
- rcu_read_lock();
- for (fz = rcu_dereference(table->fn_zone_list);
- fz != NULL;
- fz = rcu_dereference(fz->fz_next), m++) {
- if (m < s_m)
- continue;
- if (fn_hash_dump_zone(skb, cb, tb, fz) < 0) {
- cb->args[2] = m;
- rcu_read_unlock();
- return -1;
- }
- memset(&cb->args[3], 0,
- sizeof(cb->args) - 3*sizeof(cb->args[0]));
- }
- rcu_read_unlock();
- cb->args[2] = m;
- return skb->len;
-}
-
-void __init fib_hash_init(void)
-{
- fn_hash_kmem = kmem_cache_create("ip_fib_hash", sizeof(struct fib_node),
- 0, SLAB_PANIC, NULL);
-
- fn_alias_kmem = kmem_cache_create("ip_fib_alias", sizeof(struct fib_alias),
- 0, SLAB_PANIC, NULL);
-
-}
-
-struct fib_table *fib_hash_table(u32 id)
-{
- struct fib_table *tb;
-
- tb = kmalloc(sizeof(struct fib_table) + sizeof(struct fn_hash),
- GFP_KERNEL);
- if (tb == NULL)
- return NULL;
-
- tb->tb_id = id;
- tb->tb_default = -1;
-
- memset(tb->tb_data, 0, sizeof(struct fn_hash));
- return tb;
-}
-
-/* ------------------------------------------------------------------------ */
-#ifdef CONFIG_PROC_FS
-
-struct fib_iter_state {
- struct seq_net_private p;
- struct fn_zone *zone;
- int bucket;
- struct hlist_head *hash_head;
- struct fib_node *fn;
- struct fib_alias *fa;
- loff_t pos;
- unsigned int genid;
- int valid;
-};
-
-static struct fib_alias *fib_get_first(struct seq_file *seq)
-{
- struct fib_iter_state *iter = seq->private;
- struct fib_table *main_table;
- struct fn_hash *table;
-
- main_table = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
- table = (struct fn_hash *)main_table->tb_data;
-
- iter->bucket = 0;
- iter->hash_head = NULL;
- iter->fn = NULL;
- iter->fa = NULL;
- iter->pos = 0;
- iter->genid = fib_hash_genid;
- iter->valid = 1;
-
- for (iter->zone = rcu_dereference(table->fn_zone_list);
- iter->zone != NULL;
- iter->zone = rcu_dereference(iter->zone->fz_next)) {
- int maxslot;
-
- if (!iter->zone->fz_nent)
- continue;
-
- iter->hash_head = rcu_dereference(iter->zone->fz_hash);
- maxslot = iter->zone->fz_divisor;
-
- for (iter->bucket = 0; iter->bucket < maxslot;
- ++iter->bucket, ++iter->hash_head) {
- struct hlist_node *node;
- struct fib_node *fn;
-
- hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
- struct fib_alias *fa;
-
- list_for_each_entry(fa, &fn->fn_alias, fa_list) {
- iter->fn = fn;
- iter->fa = fa;
- goto out;
- }
- }
- }
- }
-out:
- return iter->fa;
-}
-
-static struct fib_alias *fib_get_next(struct seq_file *seq)
-{
- struct fib_iter_state *iter = seq->private;
- struct fib_node *fn;
- struct fib_alias *fa;
-
- /* Advance FA, if any. */
- fn = iter->fn;
- fa = iter->fa;
- if (fa) {
- BUG_ON(!fn);
- list_for_each_entry_continue(fa, &fn->fn_alias, fa_list) {
- iter->fa = fa;
- goto out;
- }
- }
-
- fa = iter->fa = NULL;
-
- /* Advance FN. */
- if (fn) {
- struct hlist_node *node = &fn->fn_hash;
- hlist_for_each_entry_continue(fn, node, fn_hash) {
- iter->fn = fn;
-
- list_for_each_entry(fa, &fn->fn_alias, fa_list) {
- iter->fa = fa;
- goto out;
- }
- }
- }
-
- fn = iter->fn = NULL;
-
- /* Advance hash chain. */
- if (!iter->zone)
- goto out;
-
- for (;;) {
- struct hlist_node *node;
- int maxslot;
-
- maxslot = iter->zone->fz_divisor;
-
- while (++iter->bucket < maxslot) {
- iter->hash_head++;
-
- hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
- list_for_each_entry(fa, &fn->fn_alias, fa_list) {
- iter->fn = fn;
- iter->fa = fa;
- goto out;
- }
- }
- }
-
- iter->zone = rcu_dereference(iter->zone->fz_next);
-
- if (!iter->zone)
- goto out;
-
- iter->bucket = 0;
- iter->hash_head = rcu_dereference(iter->zone->fz_hash);
-
- hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
- list_for_each_entry(fa, &fn->fn_alias, fa_list) {
- iter->fn = fn;
- iter->fa = fa;
- goto out;
- }
- }
- }
-out:
- iter->pos++;
- return fa;
-}
-
-static struct fib_alias *fib_get_idx(struct seq_file *seq, loff_t pos)
-{
- struct fib_iter_state *iter = seq->private;
- struct fib_alias *fa;
-
- if (iter->valid && pos >= iter->pos && iter->genid == fib_hash_genid) {
- fa = iter->fa;
- pos -= iter->pos;
- } else
- fa = fib_get_first(seq);
-
- if (fa)
- while (pos && (fa = fib_get_next(seq)))
- --pos;
- return pos ? NULL : fa;
-}
-
-static void *fib_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(RCU)
-{
- void *v = NULL;
-
- rcu_read_lock();
- if (fib_get_table(seq_file_net(seq), RT_TABLE_MAIN))
- v = *pos ? fib_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
- return v;
-}
-
-static void *fib_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- ++*pos;
- return v == SEQ_START_TOKEN ? fib_get_first(seq) : fib_get_next(seq);
-}
-
-static void fib_seq_stop(struct seq_file *seq, void *v)
- __releases(RCU)
-{
- rcu_read_unlock();
-}
-
-static unsigned fib_flag_trans(int type, __be32 mask, struct fib_info *fi)
-{
- static const unsigned type2flags[RTN_MAX + 1] = {
- [7] = RTF_REJECT,
- [8] = RTF_REJECT,
- };
- unsigned flags = type2flags[type];
-
- if (fi && fi->fib_nh->nh_gw)
- flags |= RTF_GATEWAY;
- if (mask == htonl(0xFFFFFFFF))
- flags |= RTF_HOST;
- flags |= RTF_UP;
- return flags;
-}
-
-/*
- * This outputs /proc/net/route.
- *
- * It always works in backward compatibility mode.
- * The format of the file is not supposed to be changed.
- */
-static int fib_seq_show(struct seq_file *seq, void *v)
-{
- struct fib_iter_state *iter;
- int len;
- __be32 prefix, mask;
- unsigned flags;
- struct fib_node *f;
- struct fib_alias *fa;
- struct fib_info *fi;
-
- if (v == SEQ_START_TOKEN) {
- seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
- "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
- "\tWindow\tIRTT");
- goto out;
- }
-
- iter = seq->private;
- f = iter->fn;
- fa = iter->fa;
- fi = fa->fa_info;
- prefix = f->fn_key;
- mask = FZ_MASK(iter->zone);
- flags = fib_flag_trans(fa->fa_type, mask, fi);
- if (fi)
- seq_printf(seq,
- "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u%n",
- fi->fib_dev ? fi->fib_dev->name : "*", prefix,
- fi->fib_nh->nh_gw, flags, 0, 0, fi->fib_priority,
- mask, (fi->fib_advmss ? fi->fib_advmss + 40 : 0),
- fi->fib_window,
- fi->fib_rtt >> 3, &len);
- else
- seq_printf(seq,
- "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u%n",
- prefix, 0, flags, 0, 0, 0, mask, 0, 0, 0, &len);
-
- seq_printf(seq, "%*s\n", 127 - len, "");
-out:
- return 0;
-}
-
-static const struct seq_operations fib_seq_ops = {
- .start = fib_seq_start,
- .next = fib_seq_next,
- .stop = fib_seq_stop,
- .show = fib_seq_show,
-};
-
-static int fib_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open_net(inode, file, &fib_seq_ops,
- sizeof(struct fib_iter_state));
-}
-
-static const struct file_operations fib_seq_fops = {
- .owner = THIS_MODULE,
- .open = fib_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release_net,
-};
-
-int __net_init fib_proc_init(struct net *net)
-{
- if (!proc_net_fops_create(net, "route", S_IRUGO, &fib_seq_fops))
- return -ENOMEM;
- return 0;
-}
-
-void __net_exit fib_proc_exit(struct net *net)
-{
- proc_net_remove(net, "route");
-}
-#endif /* CONFIG_PROC_FS */
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index c079cc0..d5c40d8 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -25,7 +25,7 @@ static inline void fib_alias_accessed(struct fib_alias *fa)
}
/* Exported by fib_semantics.c */
-extern int fib_semantic_match(struct list_head *head,
+extern int fib_semantic_match(struct fib_table *tb, struct list_head *head,
const struct flowi *flp,
struct fib_result *res, int prefixlen, int fib_flags);
extern void fib_release_info(struct fib_info *);
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 7981a24..3018efb 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -41,13 +41,13 @@ struct fib4_rule {
__be32 srcmask;
__be32 dst;
__be32 dstmask;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
u32 tclassid;
#endif
};
-#ifdef CONFIG_NET_CLS_ROUTE
-u32 fib_rules_tclass(struct fib_result *res)
+#ifdef CONFIG_IP_ROUTE_CLASSID
+u32 fib_rules_tclass(const struct fib_result *res)
{
return res->r ? ((struct fib4_rule *) res->r)->tclassid : 0;
}
@@ -165,7 +165,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
if (frh->dst_len)
rule4->dst = nla_get_be32(tb[FRA_DST]);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (tb[FRA_FLOW])
rule4->tclassid = nla_get_u32(tb[FRA_FLOW]);
#endif
@@ -195,7 +195,7 @@ static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
if (frh->tos && (rule4->tos != frh->tos))
return 0;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (tb[FRA_FLOW] && (rule4->tclassid != nla_get_u32(tb[FRA_FLOW])))
return 0;
#endif
@@ -224,7 +224,7 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
if (rule4->src_len)
NLA_PUT_BE32(skb, FRA_SRC, rule4->src);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (rule4->tclassid)
NLA_PUT_U32(skb, FRA_FLOW, rule4->tclassid);
#endif
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 12d3dc3..562f34c 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -49,7 +49,7 @@
static DEFINE_SPINLOCK(fib_info_lock);
static struct hlist_head *fib_info_hash;
static struct hlist_head *fib_info_laddrhash;
-static unsigned int fib_hash_size;
+static unsigned int fib_info_hash_size;
static unsigned int fib_info_cnt;
#define DEVINDEX_HASHBITS 8
@@ -152,6 +152,8 @@ static void free_fib_info_rcu(struct rcu_head *head)
{
struct fib_info *fi = container_of(head, struct fib_info, rcu);
+ if (fi->fib_metrics != (u32 *) dst_default_metrics)
+ kfree(fi->fib_metrics);
kfree(fi);
}
@@ -200,7 +202,7 @@ static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
#ifdef CONFIG_IP_ROUTE_MULTIPATH
nh->nh_weight != onh->nh_weight ||
#endif
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
nh->nh_tclassid != onh->nh_tclassid ||
#endif
((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD))
@@ -221,7 +223,7 @@ static inline unsigned int fib_devindex_hashfn(unsigned int val)
static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
{
- unsigned int mask = (fib_hash_size - 1);
+ unsigned int mask = (fib_info_hash_size - 1);
unsigned int val = fi->fib_nhs;
val ^= fi->fib_protocol;
@@ -422,7 +424,7 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
nla = nla_find(attrs, attrlen, RTA_FLOW);
nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
#endif
@@ -476,7 +478,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
if (nla && nla_get_be32(nla) != nh->nh_gw)
return 1;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
nla = nla_find(attrs, attrlen, RTA_FLOW);
if (nla && nla_get_u32(nla) != nh->nh_tclassid)
return 1;
@@ -613,14 +615,14 @@ out:
static inline unsigned int fib_laddr_hashfn(__be32 val)
{
- unsigned int mask = (fib_hash_size - 1);
+ unsigned int mask = (fib_info_hash_size - 1);
return ((__force u32)val ^
((__force u32)val >> 7) ^
((__force u32)val >> 14)) & mask;
}
-static struct hlist_head *fib_hash_alloc(int bytes)
+static struct hlist_head *fib_info_hash_alloc(int bytes)
{
if (bytes <= PAGE_SIZE)
return kzalloc(bytes, GFP_KERNEL);
@@ -630,7 +632,7 @@ static struct hlist_head *fib_hash_alloc(int bytes)
get_order(bytes));
}
-static void fib_hash_free(struct hlist_head *hash, int bytes)
+static void fib_info_hash_free(struct hlist_head *hash, int bytes)
{
if (!hash)
return;
@@ -641,18 +643,18 @@ static void fib_hash_free(struct hlist_head *hash, int bytes)
free_pages((unsigned long) hash, get_order(bytes));
}
-static void fib_hash_move(struct hlist_head *new_info_hash,
- struct hlist_head *new_laddrhash,
- unsigned int new_size)
+static void fib_info_hash_move(struct hlist_head *new_info_hash,
+ struct hlist_head *new_laddrhash,
+ unsigned int new_size)
{
struct hlist_head *old_info_hash, *old_laddrhash;
- unsigned int old_size = fib_hash_size;
+ unsigned int old_size = fib_info_hash_size;
unsigned int i, bytes;
spin_lock_bh(&fib_info_lock);
old_info_hash = fib_info_hash;
old_laddrhash = fib_info_laddrhash;
- fib_hash_size = new_size;
+ fib_info_hash_size = new_size;
for (i = 0; i < old_size; i++) {
struct hlist_head *head = &fib_info_hash[i];
@@ -693,8 +695,8 @@ static void fib_hash_move(struct hlist_head *new_info_hash,
spin_unlock_bh(&fib_info_lock);
bytes = old_size * sizeof(struct hlist_head *);
- fib_hash_free(old_info_hash, bytes);
- fib_hash_free(old_laddrhash, bytes);
+ fib_info_hash_free(old_info_hash, bytes);
+ fib_info_hash_free(old_laddrhash, bytes);
}
struct fib_info *fib_create_info(struct fib_config *cfg)
@@ -718,8 +720,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
#endif
err = -ENOBUFS;
- if (fib_info_cnt >= fib_hash_size) {
- unsigned int new_size = fib_hash_size << 1;
+ if (fib_info_cnt >= fib_info_hash_size) {
+ unsigned int new_size = fib_info_hash_size << 1;
struct hlist_head *new_info_hash;
struct hlist_head *new_laddrhash;
unsigned int bytes;
@@ -727,21 +729,27 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
if (!new_size)
new_size = 1;
bytes = new_size * sizeof(struct hlist_head *);
- new_info_hash = fib_hash_alloc(bytes);
- new_laddrhash = fib_hash_alloc(bytes);
+ new_info_hash = fib_info_hash_alloc(bytes);
+ new_laddrhash = fib_info_hash_alloc(bytes);
if (!new_info_hash || !new_laddrhash) {
- fib_hash_free(new_info_hash, bytes);
- fib_hash_free(new_laddrhash, bytes);
+ fib_info_hash_free(new_info_hash, bytes);
+ fib_info_hash_free(new_laddrhash, bytes);
} else
- fib_hash_move(new_info_hash, new_laddrhash, new_size);
+ fib_info_hash_move(new_info_hash, new_laddrhash, new_size);
- if (!fib_hash_size)
+ if (!fib_info_hash_size)
goto failure;
}
fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
if (fi == NULL)
goto failure;
+ if (cfg->fc_mx) {
+ fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+ if (!fi->fib_metrics)
+ goto failure;
+ } else
+ fi->fib_metrics = (u32 *) dst_default_metrics;
fib_info_cnt++;
fi->fib_net = hold_net(net);
@@ -779,7 +787,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
goto err_inval;
if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw)
goto err_inval;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow)
goto err_inval;
#endif
@@ -792,7 +800,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
nh->nh_oif = cfg->fc_oif;
nh->nh_gw = cfg->fc_gw;
nh->nh_flags = cfg->fc_flags;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
nh->nh_tclassid = cfg->fc_flow;
#endif
#ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -881,8 +889,9 @@ failure:
}
/* Note! fib_semantic_match intentionally uses RCU list functions. */
-int fib_semantic_match(struct list_head *head, const struct flowi *flp,
- struct fib_result *res, int prefixlen, int fib_flags)
+int fib_semantic_match(struct fib_table *tb, struct list_head *head,
+ const struct flowi *flp, struct fib_result *res,
+ int prefixlen, int fib_flags)
{
struct fib_alias *fa;
int nh_sel = 0;
@@ -946,6 +955,8 @@ out_fill_res:
res->type = fa->fa_type;
res->scope = fa->fa_scope;
res->fi = fa->fa_info;
+ res->table = tb;
+ res->fa_head = head;
if (!(fib_flags & FIB_LOOKUP_NOREF))
atomic_inc(&res->fi->fib_clntref);
return 0;
@@ -1002,7 +1013,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
if (fi->fib_nh->nh_oif)
NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (fi->fib_nh[0].nh_tclassid)
NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid);
#endif
@@ -1027,7 +1038,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
if (nh->nh_gw)
NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (nh->nh_tclassid)
NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid);
#endif
@@ -1125,6 +1136,62 @@ int fib_sync_down_dev(struct net_device *dev, int force)
return ret;
}
+/* Must be invoked inside of an RCU protected region. */
+void fib_select_default(struct fib_result *res)
+{
+ struct fib_info *fi = NULL, *last_resort = NULL;
+ struct list_head *fa_head = res->fa_head;
+ struct fib_table *tb = res->table;
+ int order = -1, last_idx = -1;
+ struct fib_alias *fa;
+
+ list_for_each_entry_rcu(fa, fa_head, fa_list) {
+ struct fib_info *next_fi = fa->fa_info;
+
+ if (fa->fa_scope != res->scope ||
+ fa->fa_type != RTN_UNICAST)
+ continue;
+
+ if (next_fi->fib_priority > res->fi->fib_priority)
+ break;
+ if (!next_fi->fib_nh[0].nh_gw ||
+ next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
+ continue;
+
+ fib_alias_accessed(fa);
+
+ if (fi == NULL) {
+ if (next_fi != res->fi)
+ break;
+ } else if (!fib_detect_death(fi, order, &last_resort,
+ &last_idx, tb->tb_default)) {
+ fib_result_assign(res, fi);
+ tb->tb_default = order;
+ goto out;
+ }
+ fi = next_fi;
+ order++;
+ }
+
+ if (order <= 0 || fi == NULL) {
+ tb->tb_default = -1;
+ goto out;
+ }
+
+ if (!fib_detect_death(fi, order, &last_resort, &last_idx,
+ tb->tb_default)) {
+ fib_result_assign(res, fi);
+ tb->tb_default = order;
+ goto out;
+ }
+
+ if (last_idx >= 0)
+ fib_result_assign(res, last_resort);
+ tb->tb_default = last_idx;
+out:
+ return;
+}
+
#ifdef CONFIG_IP_ROUTE_MULTIPATH
/*
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 0f28034..edf3b09 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -95,7 +95,7 @@ typedef unsigned int t_key;
#define IS_TNODE(n) (!(n->parent & T_LEAF))
#define IS_LEAF(n) (n->parent & T_LEAF)
-struct node {
+struct rt_trie_node {
unsigned long parent;
t_key key;
};
@@ -126,7 +126,7 @@ struct tnode {
struct work_struct work;
struct tnode *tnode_free;
};
- struct node *child[0];
+ struct rt_trie_node *child[0];
};
#ifdef CONFIG_IP_FIB_TRIE_STATS
@@ -151,16 +151,16 @@ struct trie_stat {
};
struct trie {
- struct node *trie;
+ struct rt_trie_node *trie;
#ifdef CONFIG_IP_FIB_TRIE_STATS
struct trie_use_stats stats;
#endif
};
-static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n);
-static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
+static void put_child(struct trie *t, struct tnode *tn, int i, struct rt_trie_node *n);
+static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
int wasfull);
-static struct node *resize(struct trie *t, struct tnode *tn);
+static struct rt_trie_node *resize(struct trie *t, struct tnode *tn);
static struct tnode *inflate(struct trie *t, struct tnode *tn);
static struct tnode *halve(struct trie *t, struct tnode *tn);
/* tnodes to free after resize(); protected by RTNL */
@@ -177,12 +177,12 @@ static const int sync_pages = 128;
static struct kmem_cache *fn_alias_kmem __read_mostly;
static struct kmem_cache *trie_leaf_kmem __read_mostly;
-static inline struct tnode *node_parent(struct node *node)
+static inline struct tnode *node_parent(struct rt_trie_node *node)
{
return (struct tnode *)(node->parent & ~NODE_TYPE_MASK);
}
-static inline struct tnode *node_parent_rcu(struct node *node)
+static inline struct tnode *node_parent_rcu(struct rt_trie_node *node)
{
struct tnode *ret = node_parent(node);
@@ -192,22 +192,22 @@ static inline struct tnode *node_parent_rcu(struct node *node)
/* Same as rcu_assign_pointer
* but that macro() assumes that value is a pointer.
*/
-static inline void node_set_parent(struct node *node, struct tnode *ptr)
+static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
{
smp_wmb();
node->parent = (unsigned long)ptr | NODE_TYPE(node);
}
-static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i)
+static inline struct rt_trie_node *tnode_get_child(struct tnode *tn, unsigned int i)
{
BUG_ON(i >= 1U << tn->bits);
return tn->child[i];
}
-static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
+static inline struct rt_trie_node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
{
- struct node *ret = tnode_get_child(tn, i);
+ struct rt_trie_node *ret = tnode_get_child(tn, i);
return rcu_dereference_rtnl(ret);
}
@@ -217,12 +217,12 @@ static inline int tnode_child_length(const struct tnode *tn)
return 1 << tn->bits;
}
-static inline t_key mask_pfx(t_key k, unsigned short l)
+static inline t_key mask_pfx(t_key k, unsigned int l)
{
return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l);
}
-static inline t_key tkey_extract_bits(t_key a, int offset, int bits)
+static inline t_key tkey_extract_bits(t_key a, unsigned int offset, unsigned int bits)
{
if (offset < KEYLENGTH)
return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
@@ -378,7 +378,7 @@ static void __tnode_free_rcu(struct rcu_head *head)
{
struct tnode *tn = container_of(head, struct tnode, rcu);
size_t size = sizeof(struct tnode) +
- (sizeof(struct node *) << tn->bits);
+ (sizeof(struct rt_trie_node *) << tn->bits);
if (size <= PAGE_SIZE)
kfree(tn);
@@ -402,7 +402,7 @@ static void tnode_free_safe(struct tnode *tn)
tn->tnode_free = tnode_free_head;
tnode_free_head = tn;
tnode_free_size += sizeof(struct tnode) +
- (sizeof(struct node *) << tn->bits);
+ (sizeof(struct rt_trie_node *) << tn->bits);
}
static void tnode_free_flush(void)
@@ -443,7 +443,7 @@ static struct leaf_info *leaf_info_new(int plen)
static struct tnode *tnode_new(t_key key, int pos, int bits)
{
- size_t sz = sizeof(struct tnode) + (sizeof(struct node *) << bits);
+ size_t sz = sizeof(struct tnode) + (sizeof(struct rt_trie_node *) << bits);
struct tnode *tn = tnode_alloc(sz);
if (tn) {
@@ -456,7 +456,7 @@ static struct tnode *tnode_new(t_key key, int pos, int bits)
}
pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
- sizeof(struct node) << bits);
+ sizeof(struct rt_trie_node) << bits);
return tn;
}
@@ -465,7 +465,7 @@ static struct tnode *tnode_new(t_key key, int pos, int bits)
* and no bits are skipped. See discussion in dyntree paper p. 6
*/
-static inline int tnode_full(const struct tnode *tn, const struct node *n)
+static inline int tnode_full(const struct tnode *tn, const struct rt_trie_node *n)
{
if (n == NULL || IS_LEAF(n))
return 0;
@@ -474,7 +474,7 @@ static inline int tnode_full(const struct tnode *tn, const struct node *n)
}
static inline void put_child(struct trie *t, struct tnode *tn, int i,
- struct node *n)
+ struct rt_trie_node *n)
{
tnode_put_child_reorg(tn, i, n, -1);
}
@@ -484,10 +484,10 @@ static inline void put_child(struct trie *t, struct tnode *tn, int i,
* Update the value of full_children and empty_children.
*/
-static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
+static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
int wasfull)
{
- struct node *chi = tn->child[i];
+ struct rt_trie_node *chi = tn->child[i];
int isfull;
BUG_ON(i >= 1<<tn->bits);
@@ -515,7 +515,7 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
}
#define MAX_WORK 10
-static struct node *resize(struct trie *t, struct tnode *tn)
+static struct rt_trie_node *resize(struct trie *t, struct tnode *tn)
{
int i;
struct tnode *old_tn;
@@ -605,7 +605,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
/* Keep root node larger */
- if (!node_parent((struct node *)tn)) {
+ if (!node_parent((struct rt_trie_node *)tn)) {
inflate_threshold_use = inflate_threshold_root;
halve_threshold_use = halve_threshold_root;
} else {
@@ -635,7 +635,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
/* Return if at least one inflate is run */
if (max_work != MAX_WORK)
- return (struct node *) tn;
+ return (struct rt_trie_node *) tn;
/*
* Halve as long as the number of empty children in this
@@ -663,7 +663,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
if (tn->empty_children == tnode_child_length(tn) - 1) {
one_child:
for (i = 0; i < tnode_child_length(tn); i++) {
- struct node *n;
+ struct rt_trie_node *n;
n = tn->child[i];
if (!n)
@@ -676,7 +676,7 @@ one_child:
return n;
}
}
- return (struct node *) tn;
+ return (struct rt_trie_node *) tn;
}
static struct tnode *inflate(struct trie *t, struct tnode *tn)
@@ -723,14 +723,14 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
goto nomem;
}
- put_child(t, tn, 2*i, (struct node *) left);
- put_child(t, tn, 2*i+1, (struct node *) right);
+ put_child(t, tn, 2*i, (struct rt_trie_node *) left);
+ put_child(t, tn, 2*i+1, (struct rt_trie_node *) right);
}
}
for (i = 0; i < olen; i++) {
struct tnode *inode;
- struct node *node = tnode_get_child(oldtnode, i);
+ struct rt_trie_node *node = tnode_get_child(oldtnode, i);
struct tnode *left, *right;
int size, j;
@@ -825,7 +825,7 @@ nomem:
static struct tnode *halve(struct trie *t, struct tnode *tn)
{
struct tnode *oldtnode = tn;
- struct node *left, *right;
+ struct rt_trie_node *left, *right;
int i;
int olen = tnode_child_length(tn);
@@ -856,7 +856,7 @@ static struct tnode *halve(struct trie *t, struct tnode *tn)
if (!newn)
goto nomem;
- put_child(t, tn, i/2, (struct node *)newn);
+ put_child(t, tn, i/2, (struct rt_trie_node *)newn);
}
}
@@ -958,7 +958,7 @@ fib_find_node(struct trie *t, u32 key)
{
int pos;
struct tnode *tn;
- struct node *n;
+ struct rt_trie_node *n;
pos = 0;
n = rcu_dereference_rtnl(t->trie);
@@ -993,17 +993,17 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
key = tn->key;
- while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
+ while (tn != NULL && (tp = node_parent((struct rt_trie_node *)tn)) != NULL) {
cindex = tkey_extract_bits(key, tp->pos, tp->bits);
wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
tn = (struct tnode *) resize(t, (struct tnode *)tn);
tnode_put_child_reorg((struct tnode *)tp, cindex,
- (struct node *)tn, wasfull);
+ (struct rt_trie_node *)tn, wasfull);
- tp = node_parent((struct node *) tn);
+ tp = node_parent((struct rt_trie_node *) tn);
if (!tp)
- rcu_assign_pointer(t->trie, (struct node *)tn);
+ rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
tnode_free_flush();
if (!tp)
@@ -1015,7 +1015,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
if (IS_TNODE(tn))
tn = (struct tnode *)resize(t, (struct tnode *)tn);
- rcu_assign_pointer(t->trie, (struct node *)tn);
+ rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
tnode_free_flush();
}
@@ -1025,7 +1025,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
{
int pos, newpos;
struct tnode *tp = NULL, *tn = NULL;
- struct node *n;
+ struct rt_trie_node *n;
struct leaf *l;
int missbit;
struct list_head *fa_head = NULL;
@@ -1111,10 +1111,10 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
if (t->trie && n == NULL) {
/* Case 2: n is NULL, and will just insert a new leaf */
- node_set_parent((struct node *)l, tp);
+ node_set_parent((struct rt_trie_node *)l, tp);
cindex = tkey_extract_bits(key, tp->pos, tp->bits);
- put_child(t, (struct tnode *)tp, cindex, (struct node *)l);
+ put_child(t, (struct tnode *)tp, cindex, (struct rt_trie_node *)l);
} else {
/* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
/*
@@ -1141,18 +1141,18 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
return NULL;
}
- node_set_parent((struct node *)tn, tp);
+ node_set_parent((struct rt_trie_node *)tn, tp);
missbit = tkey_extract_bits(key, newpos, 1);
- put_child(t, tn, missbit, (struct node *)l);
+ put_child(t, tn, missbit, (struct rt_trie_node *)l);
put_child(t, tn, 1-missbit, n);
if (tp) {
cindex = tkey_extract_bits(key, tp->pos, tp->bits);
put_child(t, (struct tnode *)tp, cindex,
- (struct node *)tn);
+ (struct rt_trie_node *)tn);
} else {
- rcu_assign_pointer(t->trie, (struct node *)tn);
+ rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
tp = tn;
}
}
@@ -1340,7 +1340,7 @@ err:
}
/* should be called with rcu_read_lock */
-static int check_leaf(struct trie *t, struct leaf *l,
+static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
t_key key, const struct flowi *flp,
struct fib_result *res, int fib_flags)
{
@@ -1356,7 +1356,7 @@ static int check_leaf(struct trie *t, struct leaf *l,
if (l->key != (key & ntohl(mask)))
continue;
- err = fib_semantic_match(&li->falh, flp, res, plen, fib_flags);
+ err = fib_semantic_match(tb, &li->falh, flp, res, plen, fib_flags);
#ifdef CONFIG_IP_FIB_TRIE_STATS
if (err <= 0)
@@ -1376,13 +1376,13 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
{
struct trie *t = (struct trie *) tb->tb_data;
int ret;
- struct node *n;
+ struct rt_trie_node *n;
struct tnode *pn;
- int pos, bits;
+ unsigned int pos, bits;
t_key key = ntohl(flp->fl4_dst);
- int chopped_off;
+ unsigned int chopped_off;
t_key cindex = 0;
- int current_prefix_length = KEYLENGTH;
+ unsigned int current_prefix_length = KEYLENGTH;
struct tnode *cn;
t_key pref_mismatch;
@@ -1398,7 +1398,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
/* Just a leaf? */
if (IS_LEAF(n)) {
- ret = check_leaf(t, (struct leaf *)n, key, flp, res, fib_flags);
+ ret = check_leaf(tb, t, (struct leaf *)n, key, flp, res, fib_flags);
goto found;
}
@@ -1423,7 +1423,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
}
if (IS_LEAF(n)) {
- ret = check_leaf(t, (struct leaf *)n, key, flp, res, fib_flags);
+ ret = check_leaf(tb, t, (struct leaf *)n, key, flp, res, fib_flags);
if (ret > 0)
goto backtrace;
goto found;
@@ -1541,7 +1541,7 @@ backtrace:
if (chopped_off <= pn->bits) {
cindex &= ~(1 << (chopped_off-1));
} else {
- struct tnode *parent = node_parent_rcu((struct node *) pn);
+ struct tnode *parent = node_parent_rcu((struct rt_trie_node *) pn);
if (!parent)
goto failed;
@@ -1568,7 +1568,7 @@ found:
*/
static void trie_leaf_remove(struct trie *t, struct leaf *l)
{
- struct tnode *tp = node_parent((struct node *) l);
+ struct tnode *tp = node_parent((struct rt_trie_node *) l);
pr_debug("entering trie_leaf_remove(%p)\n", l);
@@ -1706,7 +1706,7 @@ static int trie_flush_leaf(struct leaf *l)
* Scan for the next right leaf starting at node p->child[idx]
* Since we have back pointer, no recursion necessary.
*/
-static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c)
+static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
{
do {
t_key idx;
@@ -1732,7 +1732,7 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c)
}
/* Node empty, walk back up to parent */
- c = (struct node *) p;
+ c = (struct rt_trie_node *) p;
} while ((p = node_parent_rcu(c)) != NULL);
return NULL; /* Root of trie */
@@ -1753,7 +1753,7 @@ static struct leaf *trie_firstleaf(struct trie *t)
static struct leaf *trie_nextleaf(struct leaf *l)
{
- struct node *c = (struct node *) l;
+ struct rt_trie_node *c = (struct rt_trie_node *) l;
struct tnode *p = node_parent_rcu(c);
if (!p)
@@ -1802,80 +1802,6 @@ void fib_free_table(struct fib_table *tb)
kfree(tb);
}
-void fib_table_select_default(struct fib_table *tb,
- const struct flowi *flp,
- struct fib_result *res)
-{
- struct trie *t = (struct trie *) tb->tb_data;
- int order, last_idx;
- struct fib_info *fi = NULL;
- struct fib_info *last_resort;
- struct fib_alias *fa = NULL;
- struct list_head *fa_head;
- struct leaf *l;
-
- last_idx = -1;
- last_resort = NULL;
- order = -1;
-
- rcu_read_lock();
-
- l = fib_find_node(t, 0);
- if (!l)
- goto out;
-
- fa_head = get_fa_head(l, 0);
- if (!fa_head)
- goto out;
-
- if (list_empty(fa_head))
- goto out;
-
- list_for_each_entry_rcu(fa, fa_head, fa_list) {
- struct fib_info *next_fi = fa->fa_info;
-
- if (fa->fa_scope != res->scope ||
- fa->fa_type != RTN_UNICAST)
- continue;
-
- if (next_fi->fib_priority > res->fi->fib_priority)
- break;
- if (!next_fi->fib_nh[0].nh_gw ||
- next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
- continue;
-
- fib_alias_accessed(fa);
-
- if (fi == NULL) {
- if (next_fi != res->fi)
- break;
- } else if (!fib_detect_death(fi, order, &last_resort,
- &last_idx, tb->tb_default)) {
- fib_result_assign(res, fi);
- tb->tb_default = order;
- goto out;
- }
- fi = next_fi;
- order++;
- }
- if (order <= 0 || fi == NULL) {
- tb->tb_default = -1;
- goto out;
- }
-
- if (!fib_detect_death(fi, order, &last_resort, &last_idx,
- tb->tb_default)) {
- fib_result_assign(res, fi);
- tb->tb_default = order;
- goto out;
- }
- if (last_idx >= 0)
- fib_result_assign(res, last_resort);
- tb->tb_default = last_idx;
-out:
- rcu_read_unlock();
-}
-
static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
struct fib_table *tb,
struct sk_buff *skb, struct netlink_callback *cb)
@@ -1990,7 +1916,7 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
return skb->len;
}
-void __init fib_hash_init(void)
+void __init fib_trie_init(void)
{
fn_alias_kmem = kmem_cache_create("ip_fib_alias",
sizeof(struct fib_alias),
@@ -2003,8 +1929,7 @@ void __init fib_hash_init(void)
}
-/* Fix more generic FIB names for init later */
-struct fib_table *fib_hash_table(u32 id)
+struct fib_table *fib_trie_table(u32 id)
{
struct fib_table *tb;
struct trie *t;
@@ -2036,7 +1961,7 @@ struct fib_trie_iter {
unsigned int depth;
};
-static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
+static struct rt_trie_node *fib_trie_get_next(struct fib_trie_iter *iter)
{
struct tnode *tn = iter->tnode;
unsigned int cindex = iter->index;
@@ -2050,7 +1975,7 @@ static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
iter->tnode, iter->index, iter->depth);
rescan:
while (cindex < (1<<tn->bits)) {
- struct node *n = tnode_get_child_rcu(tn, cindex);
+ struct rt_trie_node *n = tnode_get_child_rcu(tn, cindex);
if (n) {
if (IS_LEAF(n)) {
@@ -2069,7 +1994,7 @@ rescan:
}
/* Current node exhausted, pop back up */
- p = node_parent_rcu((struct node *)tn);
+ p = node_parent_rcu((struct rt_trie_node *)tn);
if (p) {
cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
tn = p;
@@ -2081,10 +2006,10 @@ rescan:
return NULL;
}
-static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
+static struct rt_trie_node *fib_trie_get_first(struct fib_trie_iter *iter,
struct trie *t)
{
- struct node *n;
+ struct rt_trie_node *n;
if (!t)
return NULL;
@@ -2108,7 +2033,7 @@ static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
static void trie_collect_stats(struct trie *t, struct trie_stat *s)
{
- struct node *n;
+ struct rt_trie_node *n;
struct fib_trie_iter iter;
memset(s, 0, sizeof(*s));
@@ -2181,7 +2106,7 @@ static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
seq_putc(seq, '\n');
seq_printf(seq, "\tPointers: %u\n", pointers);
- bytes += sizeof(struct node *) * pointers;
+ bytes += sizeof(struct rt_trie_node *) * pointers;
seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024);
}
@@ -2262,7 +2187,7 @@ static const struct file_operations fib_triestat_fops = {
.release = single_release_net,
};
-static struct node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
+static struct rt_trie_node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
{
struct fib_trie_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
@@ -2275,7 +2200,7 @@ static struct node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
struct fib_table *tb;
hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
- struct node *n;
+ struct rt_trie_node *n;
for (n = fib_trie_get_first(iter,
(struct trie *) tb->tb_data);
@@ -2304,7 +2229,7 @@ static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
struct fib_table *tb = iter->tb;
struct hlist_node *tb_node;
unsigned int h;
- struct node *n;
+ struct rt_trie_node *n;
++*pos;
/* next node in same table */
@@ -2390,7 +2315,7 @@ static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
static int fib_trie_seq_show(struct seq_file *seq, void *v)
{
const struct fib_trie_iter *iter = seq->private;
- struct node *n = v;
+ struct rt_trie_node *n = v;
if (!node_parent_rcu(n))
fib_table_print(seq, iter->tb);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 4aa1b7f..ad2bcf1 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -233,48 +233,11 @@ static inline void icmp_xmit_unlock(struct sock *sk)
* Send an ICMP frame.
*/
-/*
- * Check transmit rate limitation for given message.
- * The rate information is held in the destination cache now.
- * This function is generic and could be used for other purposes
- * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
- *
- * Note that the same dst_entry fields are modified by functions in
- * route.c too, but these work for packet destinations while xrlim_allow
- * works for icmp destinations. This means the rate limiting information
- * for one "ip object" is shared - and these ICMPs are twice limited:
- * by source and by destination.
- *
- * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
- * SHOULD allow setting of rate limits
- *
- * Shared between ICMPv4 and ICMPv6.
- */
-#define XRLIM_BURST_FACTOR 6
-int xrlim_allow(struct dst_entry *dst, int timeout)
-{
- unsigned long now, token = dst->rate_tokens;
- int rc = 0;
-
- now = jiffies;
- token += now - dst->rate_last;
- dst->rate_last = now;
- if (token > XRLIM_BURST_FACTOR * timeout)
- token = XRLIM_BURST_FACTOR * timeout;
- if (token >= timeout) {
- token -= timeout;
- rc = 1;
- }
- dst->rate_tokens = token;
- return rc;
-}
-EXPORT_SYMBOL(xrlim_allow);
-
-static inline int icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
+static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
int type, int code)
{
struct dst_entry *dst = &rt->dst;
- int rc = 1;
+ bool rc = true;
if (type > NR_ICMP_TYPES)
goto out;
@@ -288,8 +251,12 @@ static inline int icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
goto out;
/* Limit if icmp type is enabled in ratemask. */
- if ((1 << type) & net->ipv4.sysctl_icmp_ratemask)
- rc = xrlim_allow(dst, net->ipv4.sysctl_icmp_ratelimit);
+ if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) {
+ if (!rt->peer)
+ rt_bind_peer(rt, 1);
+ rc = inet_peer_xrlim_allow(rt->peer,
+ net->ipv4.sysctl_icmp_ratelimit);
+ }
out:
return rc;
}
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index a96e656..48f8d45 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -167,9 +167,9 @@ static int addr_compare(const struct inetpeer_addr *a,
int i, n = (a->family == AF_INET ? 1 : 4);
for (i = 0; i < n; i++) {
- if (a->a6[i] == b->a6[i])
+ if (a->addr.a6[i] == b->addr.a6[i])
continue;
- if (a->a6[i] < b->a6[i])
+ if (a->addr.a6[i] < b->addr.a6[i])
return -1;
return 1;
}
@@ -510,8 +510,13 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
p->daddr = *daddr;
atomic_set(&p->refcnt, 1);
atomic_set(&p->rid, 0);
- atomic_set(&p->ip_id_count, secure_ip_id(daddr->a4));
+ atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
p->tcp_ts_stamp = 0;
+ p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
+ p->rate_tokens = 0;
+ p->rate_last = 0;
+ p->pmtu_expires = 0;
+ memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
INIT_LIST_HEAD(&p->unused);
@@ -579,3 +584,44 @@ void inet_putpeer(struct inet_peer *p)
local_bh_enable();
}
EXPORT_SYMBOL_GPL(inet_putpeer);
+
+/*
+ * Check transmit rate limitation for given message.
+ * The rate information is held in the inet_peer entries now.
+ * This function is generic and could be used for other purposes
+ * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
+ *
+ * Note that the same inet_peer fields are modified by functions in
+ * route.c too, but these work for packet destinations while xrlim_allow
+ * works for icmp destinations. This means the rate limiting information
+ * for one "ip object" is shared - and these ICMPs are twice limited:
+ * by source and by destination.
+ *
+ * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
+ * SHOULD allow setting of rate limits
+ *
+ * Shared between ICMPv4 and ICMPv6.
+ */
+#define XRLIM_BURST_FACTOR 6
+bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
+{
+ unsigned long now, token;
+ bool rc = false;
+
+ if (!peer)
+ return true;
+
+ token = peer->rate_tokens;
+ now = jiffies;
+ token += now - peer->rate_last;
+ peer->rate_last = now;
+ if (token > XRLIM_BURST_FACTOR * timeout)
+ token = XRLIM_BURST_FACTOR * timeout;
+ if (token >= timeout) {
+ token -= timeout;
+ rc = true;
+ }
+ peer->rate_tokens = token;
+ return rc;
+}
+EXPORT_SYMBOL(inet_peer_xrlim_allow);
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index d859bcc..d7b2b09 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -340,7 +340,7 @@ static int ip_rcv_finish(struct sk_buff *skb)
}
}
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (unlikely(skb_dst(skb)->tclassid)) {
struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
u32 idx = skb_dst(skb)->tclassid;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index babd1a2..f926a31 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -206,8 +206,9 @@ config IP_NF_TARGET_REDIRECT
config NF_NAT_SNMP_BASIC
tristate "Basic SNMP-ALG support"
- depends on NF_NAT
+ depends on NF_CONNTRACK_SNMP && NF_NAT
depends on NETFILTER_ADVANCED
+ default NF_NAT && NF_CONNTRACK_SNMP
---help---
This module implements an Application Layer Gateway (ALG) for
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index e855fff..e95054c 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -866,6 +866,7 @@ static int compat_table_info(const struct xt_table_info *info,
memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
newinfo->initial_entries = 0;
loc_cpu_entry = info->entries[raw_smp_processor_id()];
+ xt_compat_init_offsets(NFPROTO_ARP, info->number);
xt_entry_foreach(iter, loc_cpu_entry, info->size) {
ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
if (ret != 0)
@@ -1333,6 +1334,7 @@ static int translate_compat_table(const char *name,
duprintf("translate_compat_table: size %u\n", info->size);
j = 0;
xt_compat_lock(NFPROTO_ARP);
+ xt_compat_init_offsets(NFPROTO_ARP, number);
/* Walk through entries, checking offsets. */
xt_entry_foreach(iter0, entry0, total_size) {
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 652efea..ef7d7b9 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1063,6 +1063,7 @@ static int compat_table_info(const struct xt_table_info *info,
memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
newinfo->initial_entries = 0;
loc_cpu_entry = info->entries[raw_smp_processor_id()];
+ xt_compat_init_offsets(AF_INET, info->number);
xt_entry_foreach(iter, loc_cpu_entry, info->size) {
ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
if (ret != 0)
@@ -1664,6 +1665,7 @@ translate_compat_table(struct net *net,
duprintf("translate_compat_table: size %u\n", info->size);
j = 0;
xt_compat_lock(AF_INET);
+ xt_compat_init_offsets(AF_INET, number);
/* Walk through entries, checking offsets. */
xt_entry_foreach(iter0, entry0, total_size) {
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 1e26a48..403ca57 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -300,13 +300,8 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
* that the ->target() function isn't called after ->destroy() */
ct = nf_ct_get(skb, &ctinfo);
- if (ct == NULL) {
- pr_info("no conntrack!\n");
- /* FIXME: need to drop invalid ones, since replies
- * to outgoing connections of other nodes will be
- * marked as INVALID */
+ if (ct == NULL)
return NF_DROP;
- }
/* special case: ICMP error handling. conntrack distinguishes between
* error messages (RELATED) and information requests (see below) */
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index 72ffc8f..d76d6c9 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -442,8 +442,7 @@ ipt_log_packet(u_int8_t pf,
}
#endif
- /* MAC logging for input path only. */
- if (in && !out)
+ if (in != NULL)
dump_mac_header(m, loginfo, skb);
dump_packet(m, loginfo, skb, 0);
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 294a2a3..aef5d1f 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -60,7 +60,7 @@ ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
dev_net(out)->ipv4.iptable_mangle);
/* Reroute for ANY change. */
- if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) {
+ if (ret != NF_DROP && ret != NF_STOLEN) {
iph = ip_hdr(skb);
if (iph->saddr != saddr ||
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 63f60fc..5585980 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -20,6 +20,7 @@
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_acct.h>
+#include <linux/rculist_nulls.h>
struct ct_iter_state {
struct seq_net_private p;
@@ -35,7 +36,8 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
for (st->bucket = 0;
st->bucket < net->ct.htable_size;
st->bucket++) {
- n = rcu_dereference(net->ct.hash[st->bucket].first);
+ n = rcu_dereference(
+ hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
if (!is_a_nulls(n))
return n;
}
@@ -48,13 +50,14 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
struct net *net = seq_file_net(seq);
struct ct_iter_state *st = seq->private;
- head = rcu_dereference(head->next);
+ head = rcu_dereference(hlist_nulls_next_rcu(head));
while (is_a_nulls(head)) {
if (likely(get_nulls_value(head) == st->bucket)) {
if (++st->bucket >= net->ct.htable_size)
return NULL;
}
- head = rcu_dereference(net->ct.hash[st->bucket].first);
+ head = rcu_dereference(
+ hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
}
return head;
}
@@ -217,7 +220,8 @@ static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
struct hlist_node *n;
for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
- n = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+ n = rcu_dereference(
+ hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
if (n)
return n;
}
@@ -230,11 +234,12 @@ static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
struct net *net = seq_file_net(seq);
struct ct_expect_iter_state *st = seq->private;
- head = rcu_dereference(head->next);
+ head = rcu_dereference(hlist_next_rcu(head));
while (head == NULL) {
if (++st->bucket >= nf_ct_expect_hsize)
return NULL;
- head = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+ head = rcu_dereference(
+ hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
}
return head;
}
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c
index 0f23b3f..703f366f 100644
--- a/net/ipv4/netfilter/nf_nat_amanda.c
+++ b/net/ipv4/netfilter/nf_nat_amanda.c
@@ -44,13 +44,13 @@ static unsigned int help(struct sk_buff *skb,
/* Try to get same port: if not, try to change it. */
for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
- int ret;
+ int res;
exp->tuple.dst.u.tcp.port = htons(port);
- ret = nf_ct_expect_related(exp);
- if (ret == 0)
+ res = nf_ct_expect_related(exp);
+ if (res == 0)
break;
- else if (ret != -EBUSY) {
+ else if (res != -EBUSY) {
port = 0;
break;
}
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index c04787c..21bcf47 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -221,7 +221,14 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
manips not an issue. */
if (maniptype == IP_NAT_MANIP_SRC &&
!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
- if (find_appropriate_src(net, zone, orig_tuple, tuple, range)) {
+ /* try the original tuple first */
+ if (in_range(orig_tuple, range)) {
+ if (!nf_nat_used_tuple(orig_tuple, ct)) {
+ *tuple = *orig_tuple;
+ return;
+ }
+ } else if (find_appropriate_src(net, zone, orig_tuple, tuple,
+ range)) {
pr_debug("get_unique_tuple: Found current src map\n");
if (!nf_nat_used_tuple(tuple, ct))
return;
@@ -266,7 +273,6 @@ nf_nat_setup_info(struct nf_conn *ct,
struct net *net = nf_ct_net(ct);
struct nf_conntrack_tuple curr_tuple, new_tuple;
struct nf_conn_nat *nat;
- int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK);
/* nat helper or nfctnetlink also setup binding */
nat = nfct_nat(ct);
@@ -306,8 +312,7 @@ nf_nat_setup_info(struct nf_conn *ct,
ct->status |= IPS_DST_NAT;
}
- /* Place in source hash if this is the first time. */
- if (have_to_hash) {
+ if (maniptype == IP_NAT_MANIP_SRC) {
unsigned int srchash;
srchash = hash_by_src(net, nf_ct_zone(ct),
@@ -323,9 +328,9 @@ nf_nat_setup_info(struct nf_conn *ct,
/* It's done. */
if (maniptype == IP_NAT_MANIP_DST)
- set_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
+ ct->status |= IPS_DST_NAT_DONE;
else
- set_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
+ ct->status |= IPS_SRC_NAT_DONE;
return NF_ACCEPT;
}
@@ -502,7 +507,10 @@ int nf_nat_protocol_register(const struct nf_nat_protocol *proto)
int ret = 0;
spin_lock_bh(&nf_nat_lock);
- if (nf_nat_protos[proto->protonum] != &nf_nat_unknown_protocol) {
+ if (rcu_dereference_protected(
+ nf_nat_protos[proto->protonum],
+ lockdep_is_held(&nf_nat_lock)
+ ) != &nf_nat_unknown_protocol) {
ret = -EBUSY;
goto out;
}
@@ -532,7 +540,7 @@ static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
if (nat == NULL || nat->ct == NULL)
return;
- NF_CT_ASSERT(nat->ct->status & IPS_NAT_DONE_MASK);
+ NF_CT_ASSERT(nat->ct->status & IPS_SRC_NAT_DONE);
spin_lock_bh(&nf_nat_lock);
hlist_del_rcu(&nat->bysource);
@@ -545,11 +553,10 @@ static void nf_nat_move_storage(void *new, void *old)
struct nf_conn_nat *old_nat = old;
struct nf_conn *ct = old_nat->ct;
- if (!ct || !(ct->status & IPS_NAT_DONE_MASK))
+ if (!ct || !(ct->status & IPS_SRC_NAT_DONE))
return;
spin_lock_bh(&nf_nat_lock);
- new_nat->ct = ct;
hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource);
spin_unlock_bh(&nf_nat_lock);
}
@@ -679,8 +686,7 @@ static int __net_init nf_nat_net_init(struct net *net)
{
/* Leave them the same for the moment. */
net->ipv4.nat_htable_size = net->ct.htable_size;
- net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
- &net->ipv4.nat_vmalloced, 0);
+ net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, 0);
if (!net->ipv4.nat_bysource)
return -ENOMEM;
return 0;
@@ -702,8 +708,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
{
nf_ct_iterate_cleanup(net, &clean_nat, NULL);
synchronize_rcu();
- nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
- net->ipv4.nat_htable_size);
+ nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_htable_size);
}
static struct pernet_operations nf_nat_net_ops = {
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index ee5f419..8812a02 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -54,6 +54,7 @@
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_nat_helper.h>
+#include <linux/netfilter/nf_conntrack_snmp.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
@@ -1310,9 +1311,9 @@ static int __init nf_nat_snmp_basic_init(void)
{
int ret = 0;
- ret = nf_conntrack_helper_register(&snmp_helper);
- if (ret < 0)
- return ret;
+ BUG_ON(nf_nat_snmp_hook != NULL);
+ rcu_assign_pointer(nf_nat_snmp_hook, help);
+
ret = nf_conntrack_helper_register(&snmp_trap_helper);
if (ret < 0) {
nf_conntrack_helper_unregister(&snmp_helper);
@@ -1323,7 +1324,7 @@ static int __init nf_nat_snmp_basic_init(void)
static void __exit nf_nat_snmp_basic_fini(void)
{
- nf_conntrack_helper_unregister(&snmp_helper);
+ rcu_assign_pointer(nf_nat_snmp_hook, NULL);
nf_conntrack_helper_unregister(&snmp_trap_helper);
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6ed6603..52b077d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -131,9 +131,6 @@ static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
static int rt_chain_length_max __read_mostly = 20;
-static struct delayed_work expires_work;
-static unsigned long expires_ljiffies;
-
/*
* Interface to generic destination cache.
*/
@@ -152,6 +149,41 @@ static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
{
}
+static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
+{
+ struct rtable *rt = (struct rtable *) dst;
+ struct inet_peer *peer;
+ u32 *p = NULL;
+
+ if (!rt->peer)
+ rt_bind_peer(rt, 1);
+
+ peer = rt->peer;
+ if (peer) {
+ u32 *old_p = __DST_METRICS_PTR(old);
+ unsigned long prev, new;
+
+ p = peer->metrics;
+ if (inet_metrics_new(peer))
+ memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+
+ new = (unsigned long) p;
+ prev = cmpxchg(&dst->_metrics, old, new);
+
+ if (prev != old) {
+ p = __DST_METRICS_PTR(prev);
+ if (prev & DST_METRICS_READ_ONLY)
+ p = NULL;
+ } else {
+ if (rt->fi) {
+ fib_info_put(rt->fi);
+ rt->fi = NULL;
+ }
+ }
+ }
+ return p;
+}
+
static struct dst_ops ipv4_dst_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
@@ -159,6 +191,7 @@ static struct dst_ops ipv4_dst_ops = {
.check = ipv4_dst_check,
.default_advmss = ipv4_default_advmss,
.default_mtu = ipv4_default_mtu,
+ .cow_metrics = ipv4_cow_metrics,
.destroy = ipv4_dst_destroy,
.ifdown = ipv4_dst_ifdown,
.negative_advice = ipv4_negative_advice,
@@ -514,7 +547,7 @@ static const struct file_operations rt_cpu_seq_fops = {
.release = seq_release,
};
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
static int rt_acct_proc_show(struct seq_file *m, void *v)
{
struct ip_rt_acct *dst, *src;
@@ -567,14 +600,14 @@ static int __net_init ip_rt_do_proc_init(struct net *net)
if (!pde)
goto err2;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
if (!pde)
goto err3;
#endif
return 0;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
err3:
remove_proc_entry("rt_cache", net->proc_net_stat);
#endif
@@ -588,7 +621,7 @@ static void __net_exit ip_rt_do_proc_exit(struct net *net)
{
remove_proc_entry("rt_cache", net->proc_net_stat);
remove_proc_entry("rt_cache", net->proc_net);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
remove_proc_entry("rt_acct", net->proc_net);
#endif
}
@@ -632,7 +665,7 @@ static inline int rt_fast_clean(struct rtable *rth)
static inline int rt_valuable(struct rtable *rth)
{
return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
- rth->dst.expires;
+ (rth->peer && rth->peer->pmtu_expires);
}
static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
@@ -643,13 +676,7 @@ static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long t
if (atomic_read(&rth->dst.__refcnt))
goto out;
- ret = 1;
- if (rth->dst.expires &&
- time_after_eq(jiffies, rth->dst.expires))
- goto out;
-
age = jiffies - rth->dst.lastuse;
- ret = 0;
if ((age <= tmo1 && !rt_fast_clean(rth)) ||
(age <= tmo2 && rt_valuable(rth)))
goto out;
@@ -793,97 +820,6 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
return ONE;
}
-static void rt_check_expire(void)
-{
- static unsigned int rover;
- unsigned int i = rover, goal;
- struct rtable *rth;
- struct rtable __rcu **rthp;
- unsigned long samples = 0;
- unsigned long sum = 0, sum2 = 0;
- unsigned long delta;
- u64 mult;
-
- delta = jiffies - expires_ljiffies;
- expires_ljiffies = jiffies;
- mult = ((u64)delta) << rt_hash_log;
- if (ip_rt_gc_timeout > 1)
- do_div(mult, ip_rt_gc_timeout);
- goal = (unsigned int)mult;
- if (goal > rt_hash_mask)
- goal = rt_hash_mask + 1;
- for (; goal > 0; goal--) {
- unsigned long tmo = ip_rt_gc_timeout;
- unsigned long length;
-
- i = (i + 1) & rt_hash_mask;
- rthp = &rt_hash_table[i].chain;
-
- if (need_resched())
- cond_resched();
-
- samples++;
-
- if (rcu_dereference_raw(*rthp) == NULL)
- continue;
- length = 0;
- spin_lock_bh(rt_hash_lock_addr(i));
- while ((rth = rcu_dereference_protected(*rthp,
- lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
- prefetch(rth->dst.rt_next);
- if (rt_is_expired(rth)) {
- *rthp = rth->dst.rt_next;
- rt_free(rth);
- continue;
- }
- if (rth->dst.expires) {
- /* Entry is expired even if it is in use */
- if (time_before_eq(jiffies, rth->dst.expires)) {
-nofree:
- tmo >>= 1;
- rthp = &rth->dst.rt_next;
- /*
- * We only count entries on
- * a chain with equal hash inputs once
- * so that entries for different QOS
- * levels, and other non-hash input
- * attributes don't unfairly skew
- * the length computation
- */
- length += has_noalias(rt_hash_table[i].chain, rth);
- continue;
- }
- } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
- goto nofree;
-
- /* Cleanup aged off entries. */
- *rthp = rth->dst.rt_next;
- rt_free(rth);
- }
- spin_unlock_bh(rt_hash_lock_addr(i));
- sum += length;
- sum2 += length*length;
- }
- if (samples) {
- unsigned long avg = sum / samples;
- unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
- rt_chain_length_max = max_t(unsigned long,
- ip_rt_gc_elasticity,
- (avg + 4*sd) >> FRACT_BITS);
- }
- rover = i;
-}
-
-/*
- * rt_worker_func() is run in process context.
- * we call rt_check_expire() to scan part of the hash table
- */
-static void rt_worker_func(struct work_struct *work)
-{
- rt_check_expire();
- schedule_delayed_work(&expires_work, ip_rt_gc_interval);
-}
-
/*
* Pertubation of rt_genid by a small quantity [1..256]
* Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
@@ -1272,6 +1208,13 @@ skip_hashing:
return 0;
}
+static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
+
+static u32 rt_peer_genid(void)
+{
+ return atomic_read(&__rt_peer_genid);
+}
+
void rt_bind_peer(struct rtable *rt, int create)
{
struct inet_peer *peer;
@@ -1280,6 +1223,8 @@ void rt_bind_peer(struct rtable *rt, int create)
if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
inet_putpeer(peer);
+ else
+ rt->rt_peer_genid = rt_peer_genid();
}
/*
@@ -1349,13 +1294,8 @@ static void rt_del(unsigned hash, struct rtable *rt)
void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
__be32 saddr, struct net_device *dev)
{
- int i, k;
struct in_device *in_dev = __in_dev_get_rcu(dev);
- struct rtable *rth;
- struct rtable __rcu **rthp;
- __be32 skeys[2] = { saddr, 0 };
- int ikeys[2] = { dev->ifindex, 0 };
- struct netevent_redirect netevent;
+ struct inet_peer *peer;
struct net *net;
if (!in_dev)
@@ -1367,9 +1307,6 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
ipv4_is_zeronet(new_gw))
goto reject_redirect;
- if (!rt_caching(net))
- goto reject_redirect;
-
if (!IN_DEV_SHARED_MEDIA(in_dev)) {
if (!inet_addr_onlink(in_dev, new_gw, old_gw))
goto reject_redirect;
@@ -1380,91 +1317,13 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
goto reject_redirect;
}
- for (i = 0; i < 2; i++) {
- for (k = 0; k < 2; k++) {
- unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
- rt_genid(net));
-
- rthp = &rt_hash_table[hash].chain;
-
- while ((rth = rcu_dereference(*rthp)) != NULL) {
- struct rtable *rt;
-
- if (rth->fl.fl4_dst != daddr ||
- rth->fl.fl4_src != skeys[i] ||
- rth->fl.oif != ikeys[k] ||
- rt_is_input_route(rth) ||
- rt_is_expired(rth) ||
- !net_eq(dev_net(rth->dst.dev), net)) {
- rthp = &rth->dst.rt_next;
- continue;
- }
-
- if (rth->rt_dst != daddr ||
- rth->rt_src != saddr ||
- rth->dst.error ||
- rth->rt_gateway != old_gw ||
- rth->dst.dev != dev)
- break;
-
- dst_hold(&rth->dst);
-
- rt = dst_alloc(&ipv4_dst_ops);
- if (rt == NULL) {
- ip_rt_put(rth);
- return;
- }
-
- /* Copy all the information. */
- *rt = *rth;
- rt->dst.__use = 1;
- atomic_set(&rt->dst.__refcnt, 1);
- rt->dst.child = NULL;
- if (rt->dst.dev)
- dev_hold(rt->dst.dev);
- rt->dst.obsolete = -1;
- rt->dst.lastuse = jiffies;
- rt->dst.path = &rt->dst;
- rt->dst.neighbour = NULL;
- rt->dst.hh = NULL;
-#ifdef CONFIG_XFRM
- rt->dst.xfrm = NULL;
-#endif
- rt->rt_genid = rt_genid(net);
- rt->rt_flags |= RTCF_REDIRECTED;
-
- /* Gateway is different ... */
- rt->rt_gateway = new_gw;
-
- /* Redirect received -> path was valid */
- dst_confirm(&rth->dst);
-
- if (rt->peer)
- atomic_inc(&rt->peer->refcnt);
-
- if (arp_bind_neighbour(&rt->dst) ||
- !(rt->dst.neighbour->nud_state &
- NUD_VALID)) {
- if (rt->dst.neighbour)
- neigh_event_send(rt->dst.neighbour, NULL);
- ip_rt_put(rth);
- rt_drop(rt);
- goto do_next;
- }
+ peer = inet_getpeer_v4(daddr, 1);
+ if (peer) {
+ peer->redirect_learned.a4 = new_gw;
- netevent.old = &rth->dst;
- netevent.new = &rt->dst;
- call_netevent_notifiers(NETEVENT_REDIRECT,
- &netevent);
+ inet_putpeer(peer);
- rt_del(hash, rth);
- if (!rt_intern_hash(hash, rt, &rt, NULL, rt->fl.oif))
- ip_rt_put(rt);
- goto do_next;
- }
- do_next:
- ;
- }
+ atomic_inc(&__rt_peer_genid);
}
return;
@@ -1488,9 +1347,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
if (dst->obsolete > 0) {
ip_rt_put(rt);
ret = NULL;
- } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
- (rt->dst.expires &&
- time_after_eq(jiffies, rt->dst.expires))) {
+ } else if (rt->rt_flags & RTCF_REDIRECTED) {
unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
rt->fl.oif,
rt_genid(dev_net(dst->dev)));
@@ -1500,6 +1357,14 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
#endif
rt_del(hash, rt);
ret = NULL;
+ } else if (rt->peer &&
+ rt->peer->pmtu_expires &&
+ time_after_eq(jiffies, rt->peer->pmtu_expires)) {
+ unsigned long orig = rt->peer->pmtu_expires;
+
+ if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
+ dst_metric_set(dst, RTAX_MTU,
+ rt->peer->pmtu_orig);
}
}
return ret;
@@ -1525,6 +1390,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
{
struct rtable *rt = skb_rtable(skb);
struct in_device *in_dev;
+ struct inet_peer *peer;
int log_martians;
rcu_read_lock();
@@ -1536,33 +1402,41 @@ void ip_rt_send_redirect(struct sk_buff *skb)
log_martians = IN_DEV_LOG_MARTIANS(in_dev);
rcu_read_unlock();
+ if (!rt->peer)
+ rt_bind_peer(rt, 1);
+ peer = rt->peer;
+ if (!peer) {
+ icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
+ return;
+ }
+
/* No redirected packets during ip_rt_redirect_silence;
* reset the algorithm.
*/
- if (time_after(jiffies, rt->dst.rate_last + ip_rt_redirect_silence))
- rt->dst.rate_tokens = 0;
+ if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
+ peer->rate_tokens = 0;
/* Too many ignored redirects; do not send anything
* set dst.rate_last to the last seen redirected packet.
*/
- if (rt->dst.rate_tokens >= ip_rt_redirect_number) {
- rt->dst.rate_last = jiffies;
+ if (peer->rate_tokens >= ip_rt_redirect_number) {
+ peer->rate_last = jiffies;
return;
}
/* Check for load limit; set rate_last to the latest sent
* redirect.
*/
- if (rt->dst.rate_tokens == 0 ||
+ if (peer->rate_tokens == 0 ||
time_after(jiffies,
- (rt->dst.rate_last +
- (ip_rt_redirect_load << rt->dst.rate_tokens)))) {
+ (peer->rate_last +
+ (ip_rt_redirect_load << peer->rate_tokens)))) {
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
- rt->dst.rate_last = jiffies;
- ++rt->dst.rate_tokens;
+ peer->rate_last = jiffies;
+ ++peer->rate_tokens;
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (log_martians &&
- rt->dst.rate_tokens == ip_rt_redirect_number &&
+ peer->rate_tokens == ip_rt_redirect_number &&
net_ratelimit())
printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
&rt->rt_src, rt->rt_iif,
@@ -1574,7 +1448,9 @@ void ip_rt_send_redirect(struct sk_buff *skb)
static int ip_error(struct sk_buff *skb)
{
struct rtable *rt = skb_rtable(skb);
+ struct inet_peer *peer;
unsigned long now;
+ bool send;
int code;
switch (rt->dst.error) {
@@ -1594,15 +1470,24 @@ static int ip_error(struct sk_buff *skb)
break;
}
- now = jiffies;
- rt->dst.rate_tokens += now - rt->dst.rate_last;
- if (rt->dst.rate_tokens > ip_rt_error_burst)
- rt->dst.rate_tokens = ip_rt_error_burst;
- rt->dst.rate_last = now;
- if (rt->dst.rate_tokens >= ip_rt_error_cost) {
- rt->dst.rate_tokens -= ip_rt_error_cost;
- icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
+ if (!rt->peer)
+ rt_bind_peer(rt, 1);
+ peer = rt->peer;
+
+ send = true;
+ if (peer) {
+ now = jiffies;
+ peer->rate_tokens += now - peer->rate_last;
+ if (peer->rate_tokens > ip_rt_error_burst)
+ peer->rate_tokens = ip_rt_error_burst;
+ peer->rate_last = now;
+ if (peer->rate_tokens >= ip_rt_error_cost)
+ peer->rate_tokens -= ip_rt_error_cost;
+ else
+ send = false;
}
+ if (send)
+ icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
out: kfree_skb(skb);
return 0;
@@ -1630,88 +1515,130 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
unsigned short new_mtu,
struct net_device *dev)
{
- int i, k;
unsigned short old_mtu = ntohs(iph->tot_len);
- struct rtable *rth;
- int ikeys[2] = { dev->ifindex, 0 };
- __be32 skeys[2] = { iph->saddr, 0, };
- __be32 daddr = iph->daddr;
unsigned short est_mtu = 0;
+ struct inet_peer *peer;
- for (k = 0; k < 2; k++) {
- for (i = 0; i < 2; i++) {
- unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
- rt_genid(net));
-
- rcu_read_lock();
- for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
- rth = rcu_dereference(rth->dst.rt_next)) {
- unsigned short mtu = new_mtu;
-
- if (rth->fl.fl4_dst != daddr ||
- rth->fl.fl4_src != skeys[i] ||
- rth->rt_dst != daddr ||
- rth->rt_src != iph->saddr ||
- rth->fl.oif != ikeys[k] ||
- rt_is_input_route(rth) ||
- dst_metric_locked(&rth->dst, RTAX_MTU) ||
- !net_eq(dev_net(rth->dst.dev), net) ||
- rt_is_expired(rth))
- continue;
-
- if (new_mtu < 68 || new_mtu >= old_mtu) {
+ peer = inet_getpeer_v4(iph->daddr, 1);
+ if (peer) {
+ unsigned short mtu = new_mtu;
- /* BSD 4.2 compatibility hack :-( */
- if (mtu == 0 &&
- old_mtu >= dst_mtu(&rth->dst) &&
- old_mtu >= 68 + (iph->ihl << 2))
- old_mtu -= iph->ihl << 2;
+ if (new_mtu < 68 || new_mtu >= old_mtu) {
+ /* BSD 4.2 derived systems incorrectly adjust
+ * tot_len by the IP header length, and report
+ * a zero MTU in the ICMP message.
+ */
+ if (mtu == 0 &&
+ old_mtu >= 68 + (iph->ihl << 2))
+ old_mtu -= iph->ihl << 2;
+ mtu = guess_mtu(old_mtu);
+ }
- mtu = guess_mtu(old_mtu);
- }
- if (mtu <= dst_mtu(&rth->dst)) {
- if (mtu < dst_mtu(&rth->dst)) {
- dst_confirm(&rth->dst);
- if (mtu < ip_rt_min_pmtu) {
- u32 lock = dst_metric(&rth->dst,
- RTAX_LOCK);
- mtu = ip_rt_min_pmtu;
- lock |= (1 << RTAX_MTU);
- dst_metric_set(&rth->dst, RTAX_LOCK,
- lock);
- }
- dst_metric_set(&rth->dst, RTAX_MTU, mtu);
- dst_set_expires(&rth->dst,
- ip_rt_mtu_expires);
- }
- est_mtu = mtu;
- }
- }
- rcu_read_unlock();
+ if (mtu < ip_rt_min_pmtu)
+ mtu = ip_rt_min_pmtu;
+ if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
+ est_mtu = mtu;
+ peer->pmtu_learned = mtu;
+ peer->pmtu_expires = jiffies + ip_rt_mtu_expires;
}
+
+ inet_putpeer(peer);
+
+ atomic_inc(&__rt_peer_genid);
}
return est_mtu ? : new_mtu;
}
+static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
+{
+ unsigned long expires = peer->pmtu_expires;
+
+ if (time_before(expires, jiffies)) {
+ u32 orig_dst_mtu = dst_mtu(dst);
+ if (peer->pmtu_learned < orig_dst_mtu) {
+ if (!peer->pmtu_orig)
+ peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU);
+ dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
+ }
+ } else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
+ dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
+}
+
static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
{
- if (dst_mtu(dst) > mtu && mtu >= 68 &&
- !(dst_metric_locked(dst, RTAX_MTU))) {
- if (mtu < ip_rt_min_pmtu) {
- u32 lock = dst_metric(dst, RTAX_LOCK);
+ struct rtable *rt = (struct rtable *) dst;
+ struct inet_peer *peer;
+
+ dst_confirm(dst);
+
+ if (!rt->peer)
+ rt_bind_peer(rt, 1);
+ peer = rt->peer;
+ if (peer) {
+ if (mtu < ip_rt_min_pmtu)
mtu = ip_rt_min_pmtu;
- dst_metric_set(dst, RTAX_LOCK, lock | (1 << RTAX_MTU));
+ if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
+ peer->pmtu_learned = mtu;
+ peer->pmtu_expires = jiffies + ip_rt_mtu_expires;
+
+ atomic_inc(&__rt_peer_genid);
+ rt->rt_peer_genid = rt_peer_genid();
+
+ check_peer_pmtu(dst, peer);
}
- dst_metric_set(dst, RTAX_MTU, mtu);
- dst_set_expires(dst, ip_rt_mtu_expires);
- call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
+ inet_putpeer(peer);
+ }
+}
+
+static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
+{
+ struct rtable *rt = (struct rtable *) dst;
+ __be32 orig_gw = rt->rt_gateway;
+
+ dst_confirm(&rt->dst);
+
+ neigh_release(rt->dst.neighbour);
+ rt->dst.neighbour = NULL;
+
+ rt->rt_gateway = peer->redirect_learned.a4;
+ if (arp_bind_neighbour(&rt->dst) ||
+ !(rt->dst.neighbour->nud_state & NUD_VALID)) {
+ if (rt->dst.neighbour)
+ neigh_event_send(rt->dst.neighbour, NULL);
+ rt->rt_gateway = orig_gw;
+ return -EAGAIN;
+ } else {
+ rt->rt_flags |= RTCF_REDIRECTED;
+ call_netevent_notifiers(NETEVENT_NEIGH_UPDATE,
+ rt->dst.neighbour);
}
+ return 0;
}
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
{
- if (rt_is_expired((struct rtable *)dst))
+ struct rtable *rt = (struct rtable *) dst;
+
+ if (rt_is_expired(rt))
return NULL;
+ if (rt->rt_peer_genid != rt_peer_genid()) {
+ struct inet_peer *peer;
+
+ if (!rt->peer)
+ rt_bind_peer(rt, 0);
+
+ peer = rt->peer;
+ if (peer && peer->pmtu_expires)
+ check_peer_pmtu(dst, peer);
+
+ if (peer && peer->redirect_learned.a4 &&
+ peer->redirect_learned.a4 != rt->rt_gateway) {
+ if (check_peer_redir(dst, peer))
+ return NULL;
+ }
+
+ rt->rt_peer_genid = rt_peer_genid();
+ }
return dst;
}
@@ -1720,6 +1647,10 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
struct rtable *rt = (struct rtable *) dst;
struct inet_peer *peer = rt->peer;
+ if (rt->fi) {
+ fib_info_put(rt->fi);
+ rt->fi = NULL;
+ }
if (peer) {
rt->peer = NULL;
inet_putpeer(peer);
@@ -1734,8 +1665,14 @@ static void ipv4_link_failure(struct sk_buff *skb)
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
rt = skb_rtable(skb);
- if (rt)
- dst_set_expires(&rt->dst, 0);
+ if (rt &&
+ rt->peer &&
+ rt->peer->pmtu_expires) {
+ unsigned long orig = rt->peer->pmtu_expires;
+
+ if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
+ dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
+ }
}
static int ip_rt_bug(struct sk_buff *skb)
@@ -1775,7 +1712,7 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
memcpy(addr, &src, 4);
}
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
static void set_class_tag(struct rtable *rt, u32 tag)
{
if (!(rt->dst.tclassid & 0xFFFF))
@@ -1815,17 +1752,52 @@ static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
return mtu;
}
-static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
+static void rt_init_metrics(struct rtable *rt, struct fib_info *fi)
+{
+ struct inet_peer *peer;
+ int create = 0;
+
+ /* If a peer entry exists for this destination, we must hook
+ * it up in order to get at cached metrics.
+ */
+ if (rt->fl.flags & FLOWI_FLAG_PRECOW_METRICS)
+ create = 1;
+
+ rt_bind_peer(rt, create);
+ peer = rt->peer;
+ if (peer) {
+ if (inet_metrics_new(peer))
+ memcpy(peer->metrics, fi->fib_metrics,
+ sizeof(u32) * RTAX_MAX);
+ dst_init_metrics(&rt->dst, peer->metrics, false);
+
+ if (peer->pmtu_expires)
+ check_peer_pmtu(&rt->dst, peer);
+ if (peer->redirect_learned.a4 &&
+ peer->redirect_learned.a4 != rt->rt_gateway) {
+ rt->rt_gateway = peer->redirect_learned.a4;
+ rt->rt_flags |= RTCF_REDIRECTED;
+ }
+ } else {
+ if (fi->fib_metrics != (u32 *) dst_default_metrics) {
+ rt->fi = fi;
+ atomic_inc(&fi->fib_clntref);
+ }
+ dst_init_metrics(&rt->dst, fi->fib_metrics, true);
+ }
+}
+
+static void rt_set_nexthop(struct rtable *rt, const struct fib_result *res,
+ struct fib_info *fi, u16 type, u32 itag)
{
struct dst_entry *dst = &rt->dst;
- struct fib_info *fi = res->fi;
if (fi) {
if (FIB_RES_GW(*res) &&
FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
rt->rt_gateway = FIB_RES_GW(*res);
- dst_import_metrics(dst, fi->fib_metrics);
-#ifdef CONFIG_NET_CLS_ROUTE
+ rt_init_metrics(rt, fi);
+#ifdef CONFIG_IP_ROUTE_CLASSID
dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
#endif
}
@@ -1835,13 +1807,26 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
#ifdef CONFIG_IP_MULTIPLE_TABLES
set_class_tag(rt, fib_rules_tclass(res));
#endif
set_class_tag(rt, itag);
#endif
- rt->rt_type = res->type;
+ rt->rt_type = type;
+}
+
+static struct rtable *rt_dst_alloc(bool nopolicy, bool noxfrm)
+{
+ struct rtable *rt = dst_alloc(&ipv4_dst_ops, 1);
+ if (rt) {
+ rt->dst.obsolete = -1;
+
+ rt->dst.flags = DST_HOST |
+ (nopolicy ? DST_NOPOLICY : 0) |
+ (noxfrm ? DST_NOXFRM : 0);
+ }
+ return rt;
}
/* called in rcu_read_lock() section */
@@ -1874,24 +1859,19 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (err < 0)
goto e_err;
}
- rth = dst_alloc(&ipv4_dst_ops);
+ rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
if (!rth)
goto e_nobufs;
rth->dst.output = ip_rt_bug;
- rth->dst.obsolete = -1;
- atomic_set(&rth->dst.__refcnt, 1);
- rth->dst.flags= DST_HOST;
- if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
- rth->dst.flags |= DST_NOPOLICY;
rth->fl.fl4_dst = daddr;
rth->rt_dst = daddr;
rth->fl.fl4_tos = tos;
rth->fl.mark = skb->mark;
rth->fl.fl4_src = saddr;
rth->rt_src = saddr;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag;
#endif
rth->rt_iif =
@@ -1959,7 +1939,7 @@ static void ip_handle_martian_source(struct net_device *dev,
/* called in rcu_read_lock() section */
static int __mkroute_input(struct sk_buff *skb,
- struct fib_result *res,
+ const struct fib_result *res,
struct in_device *in_dev,
__be32 daddr, __be32 saddr, u32 tos,
struct rtable **result)
@@ -2013,19 +1993,13 @@ static int __mkroute_input(struct sk_buff *skb,
}
}
-
- rth = dst_alloc(&ipv4_dst_ops);
+ rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY),
+ IN_DEV_CONF_GET(out_dev, NOXFRM));
if (!rth) {
err = -ENOBUFS;
goto cleanup;
}
- atomic_set(&rth->dst.__refcnt, 1);
- rth->dst.flags= DST_HOST;
- if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
- rth->dst.flags |= DST_NOPOLICY;
- if (IN_DEV_CONF_GET(out_dev, NOXFRM))
- rth->dst.flags |= DST_NOXFRM;
rth->fl.fl4_dst = daddr;
rth->rt_dst = daddr;
rth->fl.fl4_tos = tos;
@@ -2040,12 +2014,11 @@ static int __mkroute_input(struct sk_buff *skb,
rth->fl.oif = 0;
rth->rt_spec_dst= spec_dst;
- rth->dst.obsolete = -1;
rth->dst.input = ip_forward;
rth->dst.output = ip_output;
rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
- rt_set_nexthop(rth, res, itag);
+ rt_set_nexthop(rth, res, res->fi, res->type, itag);
rth->rt_flags = flags;
@@ -2190,25 +2163,20 @@ brd_input:
RT_CACHE_STAT_INC(in_brd);
local_input:
- rth = dst_alloc(&ipv4_dst_ops);
+ rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
if (!rth)
goto e_nobufs;
rth->dst.output= ip_rt_bug;
- rth->dst.obsolete = -1;
rth->rt_genid = rt_genid(net);
- atomic_set(&rth->dst.__refcnt, 1);
- rth->dst.flags= DST_HOST;
- if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
- rth->dst.flags |= DST_NOPOLICY;
rth->fl.fl4_dst = daddr;
rth->rt_dst = daddr;
rth->fl.fl4_tos = tos;
rth->fl.mark = skb->mark;
rth->fl.fl4_src = saddr;
rth->rt_src = saddr;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag;
#endif
rth->rt_iif =
@@ -2351,38 +2319,39 @@ skip_cache:
EXPORT_SYMBOL(ip_route_input_common);
/* called with rcu_read_lock() */
-static int __mkroute_output(struct rtable **result,
- struct fib_result *res,
- const struct flowi *fl,
- const struct flowi *oldflp,
- struct net_device *dev_out,
- unsigned flags)
+static struct rtable *__mkroute_output(const struct fib_result *res,
+ const struct flowi *fl,
+ const struct flowi *oldflp,
+ struct net_device *dev_out,
+ unsigned int flags)
{
- struct rtable *rth;
- struct in_device *in_dev;
+ struct fib_info *fi = res->fi;
u32 tos = RT_FL_TOS(oldflp);
+ struct in_device *in_dev;
+ u16 type = res->type;
+ struct rtable *rth;
if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags & IFF_LOOPBACK))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
if (ipv4_is_lbcast(fl->fl4_dst))
- res->type = RTN_BROADCAST;
+ type = RTN_BROADCAST;
else if (ipv4_is_multicast(fl->fl4_dst))
- res->type = RTN_MULTICAST;
+ type = RTN_MULTICAST;
else if (ipv4_is_zeronet(fl->fl4_dst))
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
if (dev_out->flags & IFF_LOOPBACK)
flags |= RTCF_LOCAL;
in_dev = __in_dev_get_rcu(dev_out);
if (!in_dev)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
- if (res->type == RTN_BROADCAST) {
+ if (type == RTN_BROADCAST) {
flags |= RTCF_BROADCAST | RTCF_LOCAL;
- res->fi = NULL;
- } else if (res->type == RTN_MULTICAST) {
+ fi = NULL;
+ } else if (type == RTN_MULTICAST) {
flags |= RTCF_MULTICAST | RTCF_LOCAL;
if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
oldflp->proto))
@@ -2391,21 +2360,14 @@ static int __mkroute_output(struct rtable **result,
* default one, but do not gateway in this case.
* Yes, it is hack.
*/
- if (res->fi && res->prefixlen < 4)
- res->fi = NULL;
+ if (fi && res->prefixlen < 4)
+ fi = NULL;
}
-
- rth = dst_alloc(&ipv4_dst_ops);
+ rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY),
+ IN_DEV_CONF_GET(in_dev, NOXFRM));
if (!rth)
- return -ENOBUFS;
-
- atomic_set(&rth->dst.__refcnt, 1);
- rth->dst.flags= DST_HOST;
- if (IN_DEV_CONF_GET(in_dev, NOXFRM))
- rth->dst.flags |= DST_NOXFRM;
- if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
- rth->dst.flags |= DST_NOPOLICY;
+ return ERR_PTR(-ENOBUFS);
rth->fl.fl4_dst = oldflp->fl4_dst;
rth->fl.fl4_tos = tos;
@@ -2423,7 +2385,6 @@ static int __mkroute_output(struct rtable **result,
rth->rt_spec_dst= fl->fl4_src;
rth->dst.output=ip_output;
- rth->dst.obsolete = -1;
rth->rt_genid = rt_genid(dev_net(dev_out));
RT_CACHE_STAT_INC(out_slow_tot);
@@ -2440,7 +2401,7 @@ static int __mkroute_output(struct rtable **result,
RT_CACHE_STAT_INC(out_slow_mc);
}
#ifdef CONFIG_IP_MROUTE
- if (res->type == RTN_MULTICAST) {
+ if (type == RTN_MULTICAST) {
if (IN_DEV_MFORWARD(in_dev) &&
!ipv4_is_local_multicast(oldflp->fl4_dst)) {
rth->dst.input = ip_mr_input;
@@ -2450,31 +2411,10 @@ static int __mkroute_output(struct rtable **result,
#endif
}
- rt_set_nexthop(rth, res, 0);
+ rt_set_nexthop(rth, res, fi, type, 0);
rth->rt_flags = flags;
- *result = rth;
- return 0;
-}
-
-/* called with rcu_read_lock() */
-static int ip_mkroute_output(struct rtable **rp,
- struct fib_result *res,
- const struct flowi *fl,
- const struct flowi *oldflp,
- struct net_device *dev_out,
- unsigned flags)
-{
- struct rtable *rth = NULL;
- int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
- unsigned hash;
- if (err == 0) {
- hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
- rt_genid(dev_net(dev_out)));
- err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif);
- }
-
- return err;
+ return rth;
}
/*
@@ -2497,6 +2437,7 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
struct fib_result res;
unsigned int flags = 0;
struct net_device *dev_out = NULL;
+ struct rtable *rth;
int err;
@@ -2505,6 +2446,7 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
res.r = NULL;
#endif
+ rcu_read_lock();
if (oldflp->fl4_src) {
err = -EINVAL;
if (ipv4_is_multicast(oldflp->fl4_src) ||
@@ -2645,7 +2587,7 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
else
#endif
if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
- fib_select_default(net, &fl, &res);
+ fib_select_default(&res);
if (!fl.fl4_src)
fl.fl4_src = FIB_RES_PREFSRC(res);
@@ -2655,17 +2597,27 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
make_route:
- err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
+ rth = __mkroute_output(&res, &fl, oldflp, dev_out, flags);
+ if (IS_ERR(rth))
+ err = PTR_ERR(rth);
+ else {
+ unsigned int hash;
-out: return err;
+ hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
+ rt_genid(dev_net(dev_out)));
+ err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif);
+ }
+
+out:
+ rcu_read_unlock();
+ return err;
}
int __ip_route_output_key(struct net *net, struct rtable **rp,
const struct flowi *flp)
{
- unsigned int hash;
- int res;
struct rtable *rth;
+ unsigned int hash;
if (!rt_caching(net))
goto slow_output;
@@ -2695,10 +2647,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
rcu_read_unlock_bh();
slow_output:
- rcu_read_lock();
- res = ip_route_output_slow(net, rp, flp);
- rcu_read_unlock();
- return res;
+ return ip_route_output_slow(net, rp, flp);
}
EXPORT_SYMBOL_GPL(__ip_route_output_key);
@@ -2731,12 +2680,11 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
{
struct rtable *ort = *rp;
struct rtable *rt = (struct rtable *)
- dst_alloc(&ipv4_dst_blackhole_ops);
+ dst_alloc(&ipv4_dst_blackhole_ops, 1);
if (rt) {
struct dst_entry *new = &rt->dst;
- atomic_set(&new->__refcnt, 1);
new->__use = 1;
new->input = dst_discard;
new->output = dst_discard;
@@ -2759,6 +2707,9 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
rt->peer = ort->peer;
if (rt->peer)
atomic_inc(&rt->peer->refcnt);
+ rt->fi = ort->fi;
+ if (rt->fi)
+ atomic_inc(&rt->fi->fib_clntref);
dst_free(new);
}
@@ -2835,7 +2786,7 @@ static int rt_fill_info(struct net *net,
}
if (rt->dst.dev)
NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (rt->dst.tclassid)
NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
#endif
@@ -2854,7 +2805,8 @@ static int rt_fill_info(struct net *net,
NLA_PUT_BE32(skb, RTA_MARK, rt->fl.mark);
error = rt->dst.error;
- expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
+ expires = (rt->peer && rt->peer->pmtu_expires) ?
+ rt->peer->pmtu_expires - jiffies : 0;
if (rt->peer) {
inet_peer_refcheck(rt->peer);
id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
@@ -3256,9 +3208,9 @@ static __net_initdata struct pernet_operations rt_genid_ops = {
};
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
-#endif /* CONFIG_NET_CLS_ROUTE */
+#endif /* CONFIG_IP_ROUTE_CLASSID */
static __initdata unsigned long rhash_entries;
static int __init set_rhash_entries(char *str)
@@ -3274,7 +3226,7 @@ int __init ip_rt_init(void)
{
int rc = 0;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
if (!ip_rt_acct)
panic("IP: failed to allocate ip_rt_acct\n");
@@ -3311,14 +3263,6 @@ int __init ip_rt_init(void)
devinet_init();
ip_fib_init();
- /* All the timers, started at system startup tend
- to synchronize. Perturb it a bit.
- */
- INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
- expires_ljiffies = jiffies;
- schedule_delayed_work(&expires_work,
- net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
-
if (ip_rt_proc_init())
printk(KERN_ERR "Unable to create route proc files\n");
#ifdef CONFIG_XFRM
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6c11eec..f9867d2 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2653,7 +2653,7 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
EXPORT_SYMBOL(compat_tcp_getsockopt);
#endif
-struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
+struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct tcphdr *th;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index eb7f82e..2f692ce 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -817,7 +817,7 @@ __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
__u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
if (!cwnd)
- cwnd = rfc3390_bytes_to_packets(tp->mss_cache);
+ cwnd = TCP_INIT_CWND;
return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 02f583b..e2b9be2 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1341,7 +1341,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_death_row.sysctl_tw_recycle &&
(dst = inet_csk_route_req(sk, req)) != NULL &&
(peer = rt_get_peer((struct rtable *)dst)) != NULL &&
- peer->daddr.a4 == saddr) {
+ peer->daddr.addr.a4 == saddr) {
inet_peer_refcheck(peer);
if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
(s32)(peer->tcp_ts - req->ts_recent) >
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 8157b17..d37baaa 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2199,7 +2199,7 @@ int udp4_ufo_send_check(struct sk_buff *skb)
return 0;
}
-struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features)
+struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index b057d40..19fbdec 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -196,8 +196,11 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
+ dst_destroy_metrics_generic(dst);
+
if (likely(xdst->u.rt.peer))
inet_putpeer(xdst->u.rt.peer);
+
xfrm_dst_destroy(xdst);
}
@@ -215,6 +218,7 @@ static struct dst_ops xfrm4_dst_ops = {
.protocol = cpu_to_be16(ETH_P_IP),
.gc = xfrm4_garbage_collect,
.update_pmtu = xfrm4_update_pmtu,
+ .cow_metrics = dst_cow_metrics_generic,
.destroy = xfrm4_dst_destroy,
.ifdown = xfrm4_dst_ifdown,
.local_out = __ip_local_out,
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 978e80e..3194aa9 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -772,7 +772,7 @@ out:
return err;
}
-static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
+static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct ipv6hdr *ipv6h;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 03e62f9..a31d91b 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -157,20 +157,20 @@ static int is_ineligible(struct sk_buff *skb)
/*
* Check the ICMP output rate limit
*/
-static inline int icmpv6_xrlim_allow(struct sock *sk, u8 type,
- struct flowi *fl)
+static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
+ struct flowi *fl)
{
struct dst_entry *dst;
struct net *net = sock_net(sk);
- int res = 0;
+ bool res = false;
/* Informational messages are not limited. */
if (type & ICMPV6_INFOMSG_MASK)
- return 1;
+ return true;
/* Do not limit pmtu discovery, it would break it. */
if (type == ICMPV6_PKT_TOOBIG)
- return 1;
+ return true;
/*
* Look up the output route.
@@ -182,7 +182,7 @@ static inline int icmpv6_xrlim_allow(struct sock *sk, u8 type,
IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_OUTNOROUTES);
} else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
- res = 1;
+ res = true;
} else {
struct rt6_info *rt = (struct rt6_info *)dst;
int tmo = net->ipv6.sysctl.icmpv6_time;
@@ -191,7 +191,9 @@ static inline int icmpv6_xrlim_allow(struct sock *sk, u8 type,
if (rt->rt6i_dst.plen < 128)
tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
- res = xrlim_allow(dst, tmo);
+ if (!rt->rt6i_peer)
+ rt6_bind_peer(rt, 1);
+ res = inet_peer_xrlim_allow(rt->rt6i_peer, tmo);
}
dst_release(dst);
return res;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 5f8d242..2600e22 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -479,10 +479,13 @@ int ip6_forward(struct sk_buff *skb)
else
target = &hdr->daddr;
+ if (!rt->rt6i_peer)
+ rt6_bind_peer(rt, 1);
+
/* Limit redirects both by destination (here)
and by source (inside ndisc_send_redirect)
*/
- if (xrlim_allow(dst, 1*HZ))
+ if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
ndisc_send_redirect(skb, n, target);
} else {
int addrtype = ipv6_addr_type(&hdr->saddr);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 2342545..7254ce3 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1553,7 +1553,9 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
"ICMPv6 Redirect: destination is not a neighbour.\n");
goto release;
}
- if (!xrlim_allow(dst, 1*HZ))
+ if (!rt->rt6i_peer)
+ rt6_bind_peer(rt, 1);
+ if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
goto release;
if (dev->addr_len) {
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 7d227c6..47b7b8d 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1076,6 +1076,7 @@ static int compat_table_info(const struct xt_table_info *info,
memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
newinfo->initial_entries = 0;
loc_cpu_entry = info->entries[raw_smp_processor_id()];
+ xt_compat_init_offsets(AF_INET6, info->number);
xt_entry_foreach(iter, loc_cpu_entry, info->size) {
ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
if (ret != 0)
@@ -1679,6 +1680,7 @@ translate_compat_table(struct net *net,
duprintf("translate_compat_table: size %u\n", info->size);
j = 0;
xt_compat_lock(AF_INET6);
+ xt_compat_init_offsets(AF_INET6, number);
/* Walk through entries, checking offsets. */
xt_entry_foreach(iter0, entry0, total_size) {
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index de33803..e6af8d7 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -452,8 +452,7 @@ ip6t_log_packet(u_int8_t pf,
in ? in->name : "",
out ? out->name : "");
- /* MAC logging for input path only. */
- if (in && !out)
+ if (in != NULL)
dump_mac_header(m, loginfo, skb);
dump_packet(m, loginfo, skb, skb_network_offset(skb), 1);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 79d43aa..0857272 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -45,6 +45,7 @@
#include <linux/netfilter_ipv6.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
struct nf_ct_frag6_skb_cb
@@ -73,7 +74,7 @@ static struct inet_frags nf_frags;
static struct netns_frags nf_init_frags;
#ifdef CONFIG_SYSCTL
-struct ctl_table nf_ct_frag6_sysctl_table[] = {
+static struct ctl_table nf_ct_frag6_sysctl_table[] = {
{
.procname = "nf_conntrack_frag6_timeout",
.data = &nf_init_frags.timeout,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index c5b0915..364e866 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -124,18 +124,18 @@ static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb)
}
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
-static int (*mh_filter)(struct sock *sock, struct sk_buff *skb);
+typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb);
-int rawv6_mh_filter_register(int (*filter)(struct sock *sock,
- struct sk_buff *skb))
+static mh_filter_t __rcu *mh_filter __read_mostly;
+
+int rawv6_mh_filter_register(mh_filter_t filter)
{
rcu_assign_pointer(mh_filter, filter);
return 0;
}
EXPORT_SYMBOL(rawv6_mh_filter_register);
-int rawv6_mh_filter_unregister(int (*filter)(struct sock *sock,
- struct sk_buff *skb))
+int rawv6_mh_filter_unregister(mh_filter_t filter)
{
rcu_assign_pointer(mh_filter, NULL);
synchronize_rcu();
@@ -193,10 +193,10 @@ static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
* policy is placed in rawv6_rcv() because it is
* required for each socket.
*/
- int (*filter)(struct sock *sock, struct sk_buff *skb);
+ mh_filter_t *filter;
filter = rcu_dereference(mh_filter);
- filtered = filter ? filter(sk, skb) : 0;
+ filtered = filter ? (*filter)(sk, skb) : 0;
break;
}
#endif
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index a998db6..f786aed 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -97,6 +97,36 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
struct in6_addr *gwaddr, int ifindex);
#endif
+static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
+{
+ struct rt6_info *rt = (struct rt6_info *) dst;
+ struct inet_peer *peer;
+ u32 *p = NULL;
+
+ if (!rt->rt6i_peer)
+ rt6_bind_peer(rt, 1);
+
+ peer = rt->rt6i_peer;
+ if (peer) {
+ u32 *old_p = __DST_METRICS_PTR(old);
+ unsigned long prev, new;
+
+ p = peer->metrics;
+ if (inet_metrics_new(peer))
+ memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+
+ new = (unsigned long) p;
+ prev = cmpxchg(&dst->_metrics, old, new);
+
+ if (prev != old) {
+ p = __DST_METRICS_PTR(prev);
+ if (prev & DST_METRICS_READ_ONLY)
+ p = NULL;
+ }
+ }
+ return p;
+}
+
static struct dst_ops ip6_dst_ops_template = {
.family = AF_INET6,
.protocol = cpu_to_be16(ETH_P_IPV6),
@@ -105,6 +135,7 @@ static struct dst_ops ip6_dst_ops_template = {
.check = ip6_dst_check,
.default_advmss = ip6_default_advmss,
.default_mtu = ip6_default_mtu,
+ .cow_metrics = ipv6_cow_metrics,
.destroy = ip6_dst_destroy,
.ifdown = ip6_dst_ifdown,
.negative_advice = ip6_negative_advice,
@@ -132,6 +163,10 @@ static struct dst_ops ip6_dst_blackhole_ops = {
.update_pmtu = ip6_rt_blackhole_update_pmtu,
};
+static const u32 ip6_template_metrics[RTAX_MAX] = {
+ [RTAX_HOPLIMIT - 1] = 255,
+};
+
static struct rt6_info ip6_null_entry_template = {
.dst = {
.__refcnt = ATOMIC_INIT(1),
@@ -187,7 +222,7 @@ static struct rt6_info ip6_blk_hole_entry_template = {
/* allocate dst with ip6_dst_ops */
static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops)
{
- return (struct rt6_info *)dst_alloc(ops);
+ return (struct rt6_info *)dst_alloc(ops, 0);
}
static void ip6_dst_destroy(struct dst_entry *dst)
@@ -206,6 +241,13 @@ static void ip6_dst_destroy(struct dst_entry *dst)
}
}
+static atomic_t __rt6_peer_genid = ATOMIC_INIT(0);
+
+static u32 rt6_peer_genid(void)
+{
+ return atomic_read(&__rt6_peer_genid);
+}
+
void rt6_bind_peer(struct rt6_info *rt, int create)
{
struct inet_peer *peer;
@@ -213,6 +255,8 @@ void rt6_bind_peer(struct rt6_info *rt, int create)
peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create);
if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL)
inet_putpeer(peer);
+ else
+ rt->rt6i_peer_genid = rt6_peer_genid();
}
static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -830,13 +874,12 @@ int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl
{
struct rt6_info *ort = (struct rt6_info *) *dstp;
struct rt6_info *rt = (struct rt6_info *)
- dst_alloc(&ip6_dst_blackhole_ops);
+ dst_alloc(&ip6_dst_blackhole_ops, 1);
struct dst_entry *new = NULL;
if (rt) {
new = &rt->dst;
- atomic_set(&new->__refcnt, 1);
new->__use = 1;
new->input = dst_discard;
new->output = dst_discard;
@@ -878,9 +921,14 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
rt = (struct rt6_info *) dst;
- if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
+ if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) {
+ if (rt->rt6i_peer_genid != rt6_peer_genid()) {
+ if (!rt->rt6i_peer)
+ rt6_bind_peer(rt, 0);
+ rt->rt6i_peer_genid = rt6_peer_genid();
+ }
return dst;
-
+ }
return NULL;
}
@@ -931,7 +979,6 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
dst_metric_set(dst, RTAX_FEATURES, features);
}
dst_metric_set(dst, RTAX_MTU, mtu);
- call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
}
}
@@ -2684,7 +2731,8 @@ static int __net_init ip6_route_net_init(struct net *net)
net->ipv6.ip6_null_entry->dst.path =
(struct dst_entry *)net->ipv6.ip6_null_entry;
net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
- dst_metric_set(&net->ipv6.ip6_null_entry->dst, RTAX_HOPLIMIT, 255);
+ dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
+ ip6_template_metrics, true);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
@@ -2695,7 +2743,8 @@ static int __net_init ip6_route_net_init(struct net *net)
net->ipv6.ip6_prohibit_entry->dst.path =
(struct dst_entry *)net->ipv6.ip6_prohibit_entry;
net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
- dst_metric_set(&net->ipv6.ip6_prohibit_entry->dst, RTAX_HOPLIMIT, 255);
+ dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
+ ip6_template_metrics, true);
net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
sizeof(*net->ipv6.ip6_blk_hole_entry),
@@ -2705,7 +2754,8 @@ static int __net_init ip6_route_net_init(struct net *net)
net->ipv6.ip6_blk_hole_entry->dst.path =
(struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
- dst_metric_set(&net->ipv6.ip6_blk_hole_entry->dst, RTAX_HOPLIMIT, 255);
+ dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
+ ip6_template_metrics, true);
#endif
net->ipv6.sysctl.flush_delay = 0;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 8ce38f1..b1599a3 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -412,7 +412,7 @@ static void prl_list_destroy_rcu(struct rcu_head *head)
p = container_of(head, struct ip_tunnel_prl_entry, rcu_head);
do {
- n = p->next;
+ n = rcu_dereference_protected(p->next, 1);
kfree(p);
p = n;
} while (p);
@@ -421,15 +421,17 @@ static void prl_list_destroy_rcu(struct rcu_head *head)
static int
ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
{
- struct ip_tunnel_prl_entry *x, **p;
+ struct ip_tunnel_prl_entry *x;
+ struct ip_tunnel_prl_entry __rcu **p;
int err = 0;
ASSERT_RTNL();
if (a && a->addr != htonl(INADDR_ANY)) {
- for (p = &t->prl; *p; p = &(*p)->next) {
- if ((*p)->addr == a->addr) {
- x = *p;
+ for (p = &t->prl;
+ (x = rtnl_dereference(*p)) != NULL;
+ p = &x->next) {
+ if (x->addr == a->addr) {
*p = x->next;
call_rcu(&x->rcu_head, prl_entry_destroy_rcu);
t->prl_count--;
@@ -438,9 +440,9 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
}
err = -ENXIO;
} else {
- if (t->prl) {
+ x = rtnl_dereference(t->prl);
+ if (x) {
t->prl_count = 0;
- x = t->prl;
call_rcu(&x->rcu_head, prl_list_destroy_rcu);
t->prl = NULL;
}
@@ -1179,7 +1181,7 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
if (!dev->tstats)
return -ENOMEM;
dev_hold(dev);
- sitn->tunnels_wc[0] = tunnel;
+ rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
return 0;
}
@@ -1196,11 +1198,12 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
for (prio = 1; prio < 4; prio++) {
int h;
for (h = 0; h < HASH_SIZE; h++) {
- struct ip_tunnel *t = sitn->tunnels[prio][h];
+ struct ip_tunnel *t;
+ t = rtnl_dereference(sitn->tunnels[prio][h]);
while (t != NULL) {
unregister_netdevice_queue(t->dev, head);
- t = t->next;
+ t = rtnl_dereference(t->next);
}
}
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 20aa95e..d6954e3 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1323,7 +1323,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_death_row.sysctl_tw_recycle &&
(dst = inet6_csk_route_req(sk, req)) != NULL &&
(peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
- ipv6_addr_equal((struct in6_addr *)peer->daddr.a6,
+ ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
&treq->rmt_addr)) {
inet_peer_refcheck(peer);
if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 9a009c6..a419a78 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1299,7 +1299,7 @@ static int udp6_ufo_send_check(struct sk_buff *skb)
return 0;
}
-static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, int features)
+static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index da87428..834dc02 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -220,6 +220,7 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
if (likely(xdst->u.rt6.rt6i_idev))
in6_dev_put(xdst->u.rt6.rt6i_idev);
+ dst_destroy_metrics_generic(dst);
if (likely(xdst->u.rt6.rt6i_peer))
inet_putpeer(xdst->u.rt6.rt6i_peer);
xfrm_dst_destroy(xdst);
@@ -257,6 +258,7 @@ static struct dst_ops xfrm6_dst_ops = {
.protocol = cpu_to_be16(ETH_P_IPV6),
.gc = xfrm6_garbage_collect,
.update_pmtu = xfrm6_update_pmtu,
+ .cow_metrics = dst_cow_metrics_generic,
.destroy = xfrm6_dst_destroy,
.ifdown = xfrm6_dst_ifdown,
.local_out = __ip6_local_out,
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 227ca82..0c9d0c0 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -76,7 +76,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
#endif /* CONFIG_MAC80211_HT_DEBUG */
if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
- &sta->sta, tid, NULL))
+ &sta->sta, tid, NULL, 0))
printk(KERN_DEBUG "HW problem - can not stop rx "
"aggregation for tid %d\n", tid);
@@ -232,6 +232,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
if (buf_size == 0)
buf_size = IEEE80211_MAX_AMPDU_BUF;
+ /* make sure the size doesn't exceed the maximum supported by the hw */
+ if (buf_size > local->hw.max_rx_aggregation_subframes)
+ buf_size = local->hw.max_rx_aggregation_subframes;
/* examine state machine */
mutex_lock(&sta->ampdu_mlme.mtx);
@@ -287,7 +290,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
}
ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
- &sta->sta, tid, &start_seq_num);
+ &sta->sta, tid, &start_seq_num, 0);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
#endif /* CONFIG_MAC80211_HT_DEBUG */
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 9cc472c..63d852c 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -190,7 +190,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
ret = drv_ampdu_action(local, sta->sdata,
IEEE80211_AMPDU_TX_STOP,
- &sta->sta, tid, NULL);
+ &sta->sta, tid, NULL, 0);
/* HW shall not deny going back to legacy */
if (WARN_ON(ret)) {
@@ -311,7 +311,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
start_seq_num = sta->tid_seq[tid] >> 4;
ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
- &sta->sta, tid, &start_seq_num);
+ &sta->sta, tid, &start_seq_num, 0);
if (ret) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "BA request denied - HW unavailable for"
@@ -342,7 +342,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
/* send AddBA request */
ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
tid_tx->dialog_token, start_seq_num,
- 0x40, tid_tx->timeout);
+ local->hw.max_tx_aggregation_subframes,
+ tid_tx->timeout);
}
int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
@@ -487,7 +488,8 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
drv_ampdu_action(local, sta->sdata,
IEEE80211_AMPDU_TX_OPERATIONAL,
- &sta->sta, tid, NULL);
+ &sta->sta, tid, NULL,
+ sta->ampdu_mlme.tid_tx[tid]->buf_size);
/*
* synchronize with TX path, while splicing the TX path
@@ -742,9 +744,11 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
{
struct tid_ampdu_tx *tid_tx;
u16 capab, tid;
+ u8 buf_size;
capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
+ buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6;
mutex_lock(&sta->ampdu_mlme.mtx);
@@ -767,12 +771,23 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
== WLAN_STATUS_SUCCESS) {
+ /*
+ * IEEE 802.11-2007 7.3.1.14:
+ * In an ADDBA Response frame, when the Status Code field
+ * is set to 0, the Buffer Size subfield is set to a value
+ * of at least 1.
+ */
+ if (!buf_size)
+ goto out;
+
if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
&tid_tx->state)) {
/* ignore duplicate response */
goto out;
}
+ tid_tx->buf_size = buf_size;
+
if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))
ieee80211_agg_tx_operational(local, sta, tid);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 9cd73b1..2ba3af8 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1215,6 +1215,9 @@ static int ieee80211_set_channel(struct wiphy *wiphy,
{
struct ieee80211_local *local = wiphy_priv(wiphy);
struct ieee80211_sub_if_data *sdata = NULL;
+ struct ieee80211_channel *old_oper;
+ enum nl80211_channel_type old_oper_type;
+ enum nl80211_channel_type old_vif_oper_type= NL80211_CHAN_NO_HT;
if (netdev)
sdata = IEEE80211_DEV_TO_SUB_IF(netdev);
@@ -1232,13 +1235,23 @@ static int ieee80211_set_channel(struct wiphy *wiphy,
break;
}
- local->oper_channel = chan;
+ if (sdata)
+ old_vif_oper_type = sdata->vif.bss_conf.channel_type;
+ old_oper_type = local->_oper_channel_type;
if (!ieee80211_set_channel_type(local, sdata, channel_type))
return -EBUSY;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- if (sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR)
+ old_oper = local->oper_channel;
+ local->oper_channel = chan;
+
+ /* Update driver if changes were actually made. */
+ if ((old_oper != local->oper_channel) ||
+ (old_oper_type != local->_oper_channel_type))
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+
+ if ((sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR) &&
+ old_vif_oper_type != sdata->vif.bss_conf.channel_type)
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
return 0;
@@ -1274,8 +1287,11 @@ static int ieee80211_scan(struct wiphy *wiphy,
case NL80211_IFTYPE_P2P_GO:
if (sdata->local->ops->hw_scan)
break;
- /* FIXME: implement NoA while scanning in software */
- return -EOPNOTSUPP;
+ /*
+ * FIXME: implement NoA while scanning in software,
+ * for now fall through to allow scanning only when
+ * beaconing hasn't been configured yet
+ */
case NL80211_IFTYPE_AP:
if (sdata->u.ap.beacon)
return -EOPNOTSUPP;
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 2dabdf7..dacace6 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -36,7 +36,7 @@ static ssize_t ieee80211_if_read(
ret = (*format)(sdata, buf, sizeof(buf));
read_unlock(&dev_base_lock);
- if (ret != -EINVAL)
+ if (ret >= 0)
ret = simple_read_from_buffer(userbuf, count, ppos, buf, ret);
return ret;
@@ -81,6 +81,8 @@ static ssize_t ieee80211_if_fmt_##name( \
IEEE80211_IF_FMT(name, field, "%d\n")
#define IEEE80211_IF_FMT_HEX(name, field) \
IEEE80211_IF_FMT(name, field, "%#x\n")
+#define IEEE80211_IF_FMT_LHEX(name, field) \
+ IEEE80211_IF_FMT(name, field, "%#lx\n")
#define IEEE80211_IF_FMT_SIZE(name, field) \
IEEE80211_IF_FMT(name, field, "%zd\n")
@@ -145,6 +147,9 @@ IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[IEEE80211_BAND_2GHZ],
HEX);
IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
HEX);
+IEEE80211_IF_FILE(flags, flags, HEX);
+IEEE80211_IF_FILE(state, state, LHEX);
+IEEE80211_IF_FILE(channel_type, vif.bss_conf.channel_type, DEC);
/* STA attributes */
IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC);
@@ -216,6 +221,104 @@ static ssize_t ieee80211_if_parse_smps(struct ieee80211_sub_if_data *sdata,
__IEEE80211_IF_FILE_W(smps);
+static ssize_t ieee80211_if_fmt_tkip_mic_test(
+ const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
+{
+ return -EOPNOTSUPP;
+}
+
+static int hwaddr_aton(const char *txt, u8 *addr)
+{
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ int a, b;
+
+ a = hex_to_bin(*txt++);
+ if (a < 0)
+ return -1;
+ b = hex_to_bin(*txt++);
+ if (b < 0)
+ return -1;
+ *addr++ = (a << 4) | b;
+ if (i < 5 && *txt++ != ':')
+ return -1;
+ }
+
+ return 0;
+}
+
+static ssize_t ieee80211_if_parse_tkip_mic_test(
+ struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
+{
+ struct ieee80211_local *local = sdata->local;
+ u8 addr[ETH_ALEN];
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+ __le16 fc;
+
+ /*
+ * Assume colon-delimited MAC address with possible white space
+ * following.
+ */
+ if (buflen < 3 * ETH_ALEN - 1)
+ return -EINVAL;
+ if (hwaddr_aton(buf, addr) < 0)
+ return -EINVAL;
+
+ if (!ieee80211_sdata_running(sdata))
+ return -ENOTCONN;
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 100);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
+ memset(hdr, 0, 24);
+ fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP:
+ fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
+ /* DA BSSID SA */
+ memcpy(hdr->addr1, addr, ETH_ALEN);
+ memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
+ memcpy(hdr->addr3, sdata->vif.addr, ETH_ALEN);
+ break;
+ case NL80211_IFTYPE_STATION:
+ fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
+ /* BSSID SA DA */
+ if (sdata->vif.bss_conf.bssid == NULL) {
+ dev_kfree_skb(skb);
+ return -ENOTCONN;
+ }
+ memcpy(hdr->addr1, sdata->vif.bss_conf.bssid, ETH_ALEN);
+ memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
+ memcpy(hdr->addr3, addr, ETH_ALEN);
+ break;
+ default:
+ dev_kfree_skb(skb);
+ return -EOPNOTSUPP;
+ }
+ hdr->frame_control = fc;
+
+ /*
+ * Add some length to the test frame to make it look bit more valid.
+ * The exact contents does not matter since the recipient is required
+ * to drop this because of the Michael MIC failure.
+ */
+ memset(skb_put(skb, 50), 0, 50);
+
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_TKIP_MIC_FAILURE;
+
+ ieee80211_tx_skb(sdata, skb);
+
+ return buflen;
+}
+
+__IEEE80211_IF_FILE_W(tkip_mic_test);
+
/* AP attributes */
IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC);
@@ -283,6 +386,9 @@ IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
static void add_sta_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted);
+ DEBUGFS_ADD(flags);
+ DEBUGFS_ADD(state);
+ DEBUGFS_ADD(channel_type);
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
@@ -291,22 +397,30 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
DEBUGFS_ADD(last_beacon);
DEBUGFS_ADD(ave_beacon);
DEBUGFS_ADD_MODE(smps, 0600);
+ DEBUGFS_ADD_MODE(tkip_mic_test, 0200);
}
static void add_ap_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted);
+ DEBUGFS_ADD(flags);
+ DEBUGFS_ADD(state);
+ DEBUGFS_ADD(channel_type);
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
DEBUGFS_ADD(num_sta_ps);
DEBUGFS_ADD(dtim_count);
DEBUGFS_ADD(num_buffered_multicast);
+ DEBUGFS_ADD_MODE(tkip_mic_test, 0200);
}
static void add_wds_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted);
+ DEBUGFS_ADD(flags);
+ DEBUGFS_ADD(state);
+ DEBUGFS_ADD(channel_type);
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
@@ -316,12 +430,18 @@ static void add_wds_files(struct ieee80211_sub_if_data *sdata)
static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted);
+ DEBUGFS_ADD(flags);
+ DEBUGFS_ADD(state);
+ DEBUGFS_ADD(channel_type);
DEBUGFS_ADD(rc_rateidx_mask_2ghz);
DEBUGFS_ADD(rc_rateidx_mask_5ghz);
}
static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
{
+ DEBUGFS_ADD(flags);
+ DEBUGFS_ADD(state);
+ DEBUGFS_ADD(channel_type);
}
#ifdef CONFIG_MAC80211_MESH
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 98d5899..78af32d 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -382,17 +382,17 @@ static inline int drv_ampdu_action(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid,
- u16 *ssn)
+ u16 *ssn, u8 buf_size)
{
int ret = -EOPNOTSUPP;
might_sleep();
- trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn);
+ trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, buf_size);
if (local->ops->ampdu_action)
ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action,
- sta, tid, ssn);
+ sta, tid, ssn, buf_size);
trace_drv_return_int(local, ret);
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 49c8421..e5cce19 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -9,6 +9,11 @@
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, ...) \
static inline void trace_ ## name(proto) {}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
#endif
#undef TRACE_SYSTEM
@@ -38,7 +43,7 @@ static inline void trace_ ## name(proto) {}
* Tracing for driver callbacks.
*/
-TRACE_EVENT(drv_return_void,
+DECLARE_EVENT_CLASS(local_only_evt,
TP_PROTO(struct ieee80211_local *local),
TP_ARGS(local),
TP_STRUCT__entry(
@@ -50,6 +55,11 @@ TRACE_EVENT(drv_return_void,
TP_printk(LOCAL_PR_FMT, LOCAL_PR_ARG)
);
+DEFINE_EVENT(local_only_evt, drv_return_void,
+ TP_PROTO(struct ieee80211_local *local),
+ TP_ARGS(local)
+);
+
TRACE_EVENT(drv_return_int,
TP_PROTO(struct ieee80211_local *local, int ret),
TP_ARGS(local, ret),
@@ -78,40 +88,14 @@ TRACE_EVENT(drv_return_u64,
TP_printk(LOCAL_PR_FMT " - %llu", LOCAL_PR_ARG, __entry->ret)
);
-TRACE_EVENT(drv_start,
+DEFINE_EVENT(local_only_evt, drv_start,
TP_PROTO(struct ieee80211_local *local),
-
- TP_ARGS(local),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- ),
-
- TP_printk(
- LOCAL_PR_FMT, LOCAL_PR_ARG
- )
+ TP_ARGS(local)
);
-TRACE_EVENT(drv_stop,
+DEFINE_EVENT(local_only_evt, drv_stop,
TP_PROTO(struct ieee80211_local *local),
-
- TP_ARGS(local),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- ),
-
- TP_printk(
- LOCAL_PR_FMT, LOCAL_PR_ARG
- )
+ TP_ARGS(local)
);
TRACE_EVENT(drv_add_interface,
@@ -439,40 +423,14 @@ TRACE_EVENT(drv_hw_scan,
)
);
-TRACE_EVENT(drv_sw_scan_start,
+DEFINE_EVENT(local_only_evt, drv_sw_scan_start,
TP_PROTO(struct ieee80211_local *local),
-
- TP_ARGS(local),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- ),
-
- TP_printk(
- LOCAL_PR_FMT, LOCAL_PR_ARG
- )
+ TP_ARGS(local)
);
-TRACE_EVENT(drv_sw_scan_complete,
+DEFINE_EVENT(local_only_evt, drv_sw_scan_complete,
TP_PROTO(struct ieee80211_local *local),
-
- TP_ARGS(local),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- ),
-
- TP_printk(
- LOCAL_PR_FMT, LOCAL_PR_ARG
- )
+ TP_ARGS(local)
);
TRACE_EVENT(drv_get_stats,
@@ -702,23 +660,9 @@ TRACE_EVENT(drv_conf_tx,
)
);
-TRACE_EVENT(drv_get_tsf,
+DEFINE_EVENT(local_only_evt, drv_get_tsf,
TP_PROTO(struct ieee80211_local *local),
-
- TP_ARGS(local),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- ),
-
- TP_printk(
- LOCAL_PR_FMT,
- LOCAL_PR_ARG
- )
+ TP_ARGS(local)
);
TRACE_EVENT(drv_set_tsf,
@@ -742,41 +686,14 @@ TRACE_EVENT(drv_set_tsf,
)
);
-TRACE_EVENT(drv_reset_tsf,
+DEFINE_EVENT(local_only_evt, drv_reset_tsf,
TP_PROTO(struct ieee80211_local *local),
-
- TP_ARGS(local),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- ),
-
- TP_printk(
- LOCAL_PR_FMT, LOCAL_PR_ARG
- )
+ TP_ARGS(local)
);
-TRACE_EVENT(drv_tx_last_beacon,
+DEFINE_EVENT(local_only_evt, drv_tx_last_beacon,
TP_PROTO(struct ieee80211_local *local),
-
- TP_ARGS(local),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- ),
-
- TP_printk(
- LOCAL_PR_FMT,
- LOCAL_PR_ARG
- )
+ TP_ARGS(local)
);
TRACE_EVENT(drv_ampdu_action,
@@ -784,9 +701,9 @@ TRACE_EVENT(drv_ampdu_action,
struct ieee80211_sub_if_data *sdata,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid,
- u16 *ssn),
+ u16 *ssn, u8 buf_size),
- TP_ARGS(local, sdata, action, sta, tid, ssn),
+ TP_ARGS(local, sdata, action, sta, tid, ssn, buf_size),
TP_STRUCT__entry(
LOCAL_ENTRY
@@ -794,6 +711,7 @@ TRACE_EVENT(drv_ampdu_action,
__field(u32, action)
__field(u16, tid)
__field(u16, ssn)
+ __field(u8, buf_size)
VIF_ENTRY
),
@@ -804,11 +722,13 @@ TRACE_EVENT(drv_ampdu_action,
__entry->action = action;
__entry->tid = tid;
__entry->ssn = ssn ? *ssn : 0;
+ __entry->buf_size = buf_size;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d",
- LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d buf:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action,
+ __entry->tid, __entry->buf_size
)
);
@@ -959,22 +879,9 @@ TRACE_EVENT(drv_remain_on_channel,
)
);
-TRACE_EVENT(drv_cancel_remain_on_channel,
+DEFINE_EVENT(local_only_evt, drv_cancel_remain_on_channel,
TP_PROTO(struct ieee80211_local *local),
-
- TP_ARGS(local),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- ),
-
- TP_printk(
- LOCAL_PR_FMT, LOCAL_PR_ARG
- )
+ TP_ARGS(local)
);
/*
@@ -1069,23 +976,9 @@ TRACE_EVENT(api_stop_tx_ba_cb,
)
);
-TRACE_EVENT(api_restart_hw,
+DEFINE_EVENT(local_only_evt, api_restart_hw,
TP_PROTO(struct ieee80211_local *local),
-
- TP_ARGS(local),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- ),
-
- TP_printk(
- LOCAL_PR_FMT,
- LOCAL_PR_ARG
- )
+ TP_ARGS(local)
);
TRACE_EVENT(api_beacon_loss,
@@ -1214,40 +1107,14 @@ TRACE_EVENT(api_chswitch_done,
)
);
-TRACE_EVENT(api_ready_on_channel,
+DEFINE_EVENT(local_only_evt, api_ready_on_channel,
TP_PROTO(struct ieee80211_local *local),
-
- TP_ARGS(local),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- ),
-
- TP_printk(
- LOCAL_PR_FMT, LOCAL_PR_ARG
- )
+ TP_ARGS(local)
);
-TRACE_EVENT(api_remain_on_channel_expired,
+DEFINE_EVENT(local_only_evt, api_remain_on_channel_expired,
TP_PROTO(struct ieee80211_local *local),
-
- TP_ARGS(local),
-
- TP_STRUCT__entry(
- LOCAL_ENTRY
- ),
-
- TP_fast_assign(
- LOCAL_ASSIGN;
- ),
-
- TP_printk(
- LOCAL_PR_FMT, LOCAL_PR_ARG
- )
+ TP_ARGS(local)
);
/*
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 53c7077..775fb63 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -270,7 +270,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
enum ieee80211_band band = rx_status->band;
if (elems->ds_params && elems->ds_params_len == 1)
- freq = ieee80211_channel_to_frequency(elems->ds_params[0]);
+ freq = ieee80211_channel_to_frequency(elems->ds_params[0],
+ band);
else
freq = rx_status->freq;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 533fd32..f2ef15d 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -225,6 +225,7 @@ struct ieee80211_if_ap {
struct sk_buff_head ps_bc_buf;
atomic_t num_sta_ps; /* number of stations in PS mode */
int dtim_count;
+ bool dtim_bc_mc;
};
struct ieee80211_if_wds {
@@ -654,8 +655,6 @@ struct tpt_led_trigger {
* well be on the operating channel
* @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to
* determine if we are on the operating channel or not
- * @SCAN_OFF_CHANNEL: We're off our operating channel for scanning,
- * gets only set in conjunction with SCAN_SW_SCANNING
* @SCAN_COMPLETED: Set for our scan work function when the driver reported
* that the scan completed.
* @SCAN_ABORTED: Set for our scan work function when the driver reported
@@ -664,7 +663,6 @@ struct tpt_led_trigger {
enum {
SCAN_SW_SCANNING,
SCAN_HW_SCANNING,
- SCAN_OFF_CHANNEL,
SCAN_COMPLETED,
SCAN_ABORTED,
};
@@ -1147,10 +1145,14 @@ void ieee80211_rx_bss_put(struct ieee80211_local *local,
struct ieee80211_bss *bss);
/* off-channel helpers */
-void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local);
-void ieee80211_offchannel_stop_station(struct ieee80211_local *local);
+bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local);
+void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
+ bool tell_ap);
+void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
+ bool offchannel_ps_enable);
void ieee80211_offchannel_return(struct ieee80211_local *local,
- bool enable_beaconing);
+ bool enable_beaconing,
+ bool offchannel_ps_disable);
void ieee80211_hw_roc_setup(struct ieee80211_local *local);
/* interface handling */
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 8acba45..5a4e19b 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -382,6 +382,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, *tmp;
u32 hw_reconf_flags = 0;
int i;
+ enum nl80211_channel_type orig_ct;
if (local->scan_sdata == sdata)
ieee80211_scan_cancel(local);
@@ -542,8 +543,14 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
hw_reconf_flags = 0;
}
+ /* Re-calculate channel-type, in case there are multiple vifs
+ * on different channel types.
+ */
+ orig_ct = local->_oper_channel_type;
+ ieee80211_set_channel_type(local, NULL, NL80211_CHAN_NO_HT);
+
/* do after stop to avoid reconfiguring when we stop anyway */
- if (hw_reconf_flags)
+ if (hw_reconf_flags || (orig_ct != local->_oper_channel_type))
ieee80211_hw_config(local, hw_reconf_flags);
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index a46ff06..c155c0b 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -98,6 +98,41 @@ static void ieee80211_reconfig_filter(struct work_struct *work)
ieee80211_configure_filter(local);
}
+/*
+ * Returns true if we are logically configured to be on
+ * the operating channel AND the hardware-conf is currently
+ * configured on the operating channel. Compares channel-type
+ * as well.
+ */
+bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local)
+{
+ struct ieee80211_channel *chan, *scan_chan;
+ enum nl80211_channel_type channel_type;
+
+ /* This logic needs to match logic in ieee80211_hw_config */
+ if (local->scan_channel) {
+ chan = local->scan_channel;
+ channel_type = NL80211_CHAN_NO_HT;
+ } else if (local->tmp_channel) {
+ chan = scan_chan = local->tmp_channel;
+ channel_type = local->tmp_channel_type;
+ } else {
+ chan = local->oper_channel;
+ channel_type = local->_oper_channel_type;
+ }
+
+ if (chan != local->oper_channel ||
+ channel_type != local->_oper_channel_type)
+ return false;
+
+ /* Check current hardware-config against oper_channel. */
+ if ((local->oper_channel != local->hw.conf.channel) ||
+ (local->_oper_channel_type != local->hw.conf.channel_type))
+ return false;
+
+ return true;
+}
+
int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
{
struct ieee80211_channel *chan, *scan_chan;
@@ -110,21 +145,27 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
scan_chan = local->scan_channel;
+ /* If this off-channel logic ever changes, ieee80211_on_oper_channel
+ * may need to change as well.
+ */
offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
if (scan_chan) {
chan = scan_chan;
channel_type = NL80211_CHAN_NO_HT;
- local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
- } else if (local->tmp_channel &&
- local->oper_channel != local->tmp_channel) {
+ } else if (local->tmp_channel) {
chan = scan_chan = local->tmp_channel;
channel_type = local->tmp_channel_type;
- local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
} else {
chan = local->oper_channel;
channel_type = local->_oper_channel_type;
- local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
}
+
+ if (chan != local->oper_channel ||
+ channel_type != local->_oper_channel_type)
+ local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
+ else
+ local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
+
offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
if (offchannel_flag || chan != local->hw.conf.channel ||
@@ -231,7 +272,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
if (changed & BSS_CHANGED_BEACON_ENABLED) {
if (local->quiescing || !ieee80211_sdata_running(sdata) ||
- test_bit(SCAN_SW_SCANNING, &local->scanning)) {
+ test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) {
sdata->vif.bss_conf.enable_beacon = false;
} else {
/*
@@ -554,6 +595,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
local->hw.queues = 1;
local->hw.max_rates = 1;
local->hw.max_report_rates = 0;
+ local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
local->user_power_level = -1;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index ca3af46..2a57cc0 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -574,7 +574,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
&elems);
if (elems.ds_params && elems.ds_params_len == 1)
- freq = ieee80211_channel_to_frequency(elems.ds_params[0]);
+ freq = ieee80211_channel_to_frequency(elems.ds_params[0], band);
else
freq = rx_status->freq;
@@ -645,7 +645,7 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags))
mesh_mpath_table_grow();
- if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags))
+ if (test_and_clear_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags))
mesh_mpp_table_grow();
if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags))
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 45fbb9e..f77adf1 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -28,8 +28,15 @@
#include "rate.h"
#include "led.h"
-#define IEEE80211_MAX_NULLFUNC_TRIES 2
-#define IEEE80211_MAX_PROBE_TRIES 5
+static int max_nullfunc_tries = 2;
+module_param(max_nullfunc_tries, int, 0644);
+MODULE_PARM_DESC(max_nullfunc_tries,
+ "Maximum nullfunc tx tries before disconnecting (reason 4).");
+
+static int max_probe_tries = 5;
+module_param(max_probe_tries, int, 0644);
+MODULE_PARM_DESC(max_probe_tries,
+ "Maximum probe tries before disconnecting (reason 4).");
/*
* Beacon loss timeout is calculated as N frames times the
@@ -51,7 +58,11 @@
* a probe request because of beacon loss or for
* checking the connection still works.
*/
-#define IEEE80211_PROBE_WAIT (HZ / 2)
+static int probe_wait_ms = 500;
+module_param(probe_wait_ms, int, 0644);
+MODULE_PARM_DESC(probe_wait_ms,
+ "Maximum time(ms) to wait for probe response"
+ " before disconnecting (reason 4).");
/*
* Weight given to the latest Beacon frame when calculating average signal
@@ -161,6 +172,7 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband;
struct sta_info *sta;
u32 changed = 0;
+ int hti_cfreq;
u16 ht_opmode;
bool enable_ht = true;
enum nl80211_channel_type prev_chantype;
@@ -174,10 +186,27 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
if (!sband->ht_cap.ht_supported)
enable_ht = false;
- /* check that channel matches the right operating channel */
- if (local->hw.conf.channel->center_freq !=
- ieee80211_channel_to_frequency(hti->control_chan))
- enable_ht = false;
+ if (enable_ht) {
+ hti_cfreq = ieee80211_channel_to_frequency(hti->control_chan,
+ sband->band);
+ /* check that channel matches the right operating channel */
+ if (local->hw.conf.channel->center_freq != hti_cfreq) {
+ /* Some APs mess this up, evidently.
+ * Netgear WNDR3700 sometimes reports 4 higher than
+ * the actual channel, for instance.
+ */
+ printk(KERN_DEBUG
+ "%s: Wrong control channel in association"
+ " response: configured center-freq: %d"
+ " hti-cfreq: %d hti->control_chan: %d"
+ " band: %d. Disabling HT.\n",
+ sdata->name,
+ local->hw.conf.channel->center_freq,
+ hti_cfreq, hti->control_chan,
+ sband->band);
+ enable_ht = false;
+ }
+ }
if (enable_ht) {
channel_type = NL80211_CHAN_HT20;
@@ -429,7 +458,8 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
container_of((void *)bss, struct cfg80211_bss, priv);
struct ieee80211_channel *new_ch;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num);
+ int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num,
+ cbss->channel->band);
ASSERT_MGD_MTX(ifmgd);
@@ -600,6 +630,14 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
continue;
+ if (sdata->vif.type == NL80211_IFTYPE_AP) {
+ /* If an AP vif is found, then disable PS
+ * by setting the count to zero thereby setting
+ * ps_sdata to NULL.
+ */
+ count = 0;
+ break;
+ }
if (sdata->vif.type != NL80211_IFTYPE_STATION)
continue;
found = sdata;
@@ -1089,7 +1127,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
const u8 *ssid;
u8 *dst = ifmgd->associated->bssid;
- u8 unicast_limit = max(1, IEEE80211_MAX_PROBE_TRIES - 3);
+ u8 unicast_limit = max(1, max_probe_tries - 3);
/*
* Try sending broadcast probe requests for the last three
@@ -1115,7 +1153,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
}
ifmgd->probe_send_count++;
- ifmgd->probe_timeout = jiffies + IEEE80211_PROBE_WAIT;
+ ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
run_again(ifmgd, ifmgd->probe_timeout);
}
@@ -1216,7 +1254,8 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
- printk(KERN_DEBUG "Connection to AP %pM lost.\n", bssid);
+ printk(KERN_DEBUG "%s: Connection to AP %pM lost.\n",
+ sdata->name, bssid);
ieee80211_set_disassoc(sdata, true, true);
mutex_unlock(&ifmgd->mtx);
@@ -1519,7 +1558,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
}
if (elems->ds_params && elems->ds_params_len == 1)
- freq = ieee80211_channel_to_frequency(elems->ds_params[0]);
+ freq = ieee80211_channel_to_frequency(elems->ds_params[0],
+ rx_status->band);
else
freq = rx_status->freq;
@@ -1960,9 +2000,9 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
- max_tries = IEEE80211_MAX_NULLFUNC_TRIES;
+ max_tries = max_nullfunc_tries;
else
- max_tries = IEEE80211_MAX_PROBE_TRIES;
+ max_tries = max_probe_tries;
/* ACK received for nullfunc probing frame */
if (!ifmgd->probe_send_count)
@@ -1972,9 +2012,9 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
wiphy_debug(local->hw.wiphy,
"%s: No ack for nullfunc frame to"
- " AP %pM, try %d\n",
+ " AP %pM, try %d/%i\n",
sdata->name, bssid,
- ifmgd->probe_send_count);
+ ifmgd->probe_send_count, max_tries);
#endif
ieee80211_mgd_probe_ap_send(sdata);
} else {
@@ -1994,17 +2034,17 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
"%s: Failed to send nullfunc to AP %pM"
" after %dms, disconnecting.\n",
sdata->name,
- bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
+ bssid, probe_wait_ms);
#endif
ieee80211_sta_connection_lost(sdata, bssid);
} else if (ifmgd->probe_send_count < max_tries) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
wiphy_debug(local->hw.wiphy,
"%s: No probe response from AP %pM"
- " after %dms, try %d\n",
+ " after %dms, try %d/%i\n",
sdata->name,
- bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ,
- ifmgd->probe_send_count);
+ bssid, probe_wait_ms,
+ ifmgd->probe_send_count, max_tries);
#endif
ieee80211_mgd_probe_ap_send(sdata);
} else {
@@ -2016,7 +2056,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
"%s: No probe response from AP %pM"
" after %dms, disconnecting.\n",
sdata->name,
- bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
+ bssid, probe_wait_ms);
ieee80211_sta_connection_lost(sdata, bssid);
}
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index b4e5267..13427b1 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -17,10 +17,14 @@
#include "driver-trace.h"
/*
- * inform AP that we will go to sleep so that it will buffer the frames
- * while we scan
+ * Tell our hardware to disable PS.
+ * Optionally inform AP that we will go to sleep so that it will buffer
+ * the frames while we are doing off-channel work. This is optional
+ * because we *may* be doing work on-operating channel, and want our
+ * hardware unconditionally awake, but still let the AP send us normal frames.
*/
-static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
+static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata,
+ bool tell_ap)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -41,8 +45,8 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
}
- if (!(local->offchannel_ps_enabled) ||
- !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
+ if (tell_ap && (!local->offchannel_ps_enabled ||
+ !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)))
/*
* If power save was enabled, no need to send a nullfunc
* frame because AP knows that we are sleeping. But if the
@@ -77,6 +81,9 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
* we are sleeping, let's just enable power save mode in
* hardware.
*/
+ /* TODO: Only set hardware if CONF_PS changed?
+ * TODO: Should we set offchannel_ps_enabled to false?
+ */
local->hw.conf.flags |= IEEE80211_CONF_PS;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
} else if (local->hw.conf.dynamic_ps_timeout > 0) {
@@ -95,63 +102,61 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
ieee80211_sta_reset_conn_monitor(sdata);
}
-void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
+void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
+ bool offchannel_ps_enable)
{
struct ieee80211_sub_if_data *sdata;
+ /*
+ * notify the AP about us leaving the channel and stop all
+ * STA interfaces.
+ */
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
continue;
- /* disable beaconing */
+ if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
+ set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
+
+ /* Check to see if we should disable beaconing. */
if (sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_ADHOC ||
sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
ieee80211_bss_info_change_notify(
sdata, BSS_CHANGED_BEACON_ENABLED);
- /*
- * only handle non-STA interfaces here, STA interfaces
- * are handled in ieee80211_offchannel_stop_station(),
- * e.g., from the background scan state machine.
- *
- * In addition, do not stop monitor interface to allow it to be
- * used from user space controlled off-channel operations.
- */
- if (sdata->vif.type != NL80211_IFTYPE_STATION &&
- sdata->vif.type != NL80211_IFTYPE_MONITOR) {
- set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
+ if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
netif_tx_stop_all_queues(sdata->dev);
+ if (offchannel_ps_enable &&
+ (sdata->vif.type == NL80211_IFTYPE_STATION) &&
+ sdata->u.mgd.associated)
+ ieee80211_offchannel_ps_enable(sdata, true);
}
}
mutex_unlock(&local->iflist_mtx);
}
-void ieee80211_offchannel_stop_station(struct ieee80211_local *local)
+void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
+ bool tell_ap)
{
struct ieee80211_sub_if_data *sdata;
- /*
- * notify the AP about us leaving the channel and stop all STA interfaces
- */
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
continue;
- if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
- netif_tx_stop_all_queues(sdata->dev);
- if (sdata->u.mgd.associated)
- ieee80211_offchannel_ps_enable(sdata);
- }
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ sdata->u.mgd.associated)
+ ieee80211_offchannel_ps_enable(sdata, tell_ap);
}
mutex_unlock(&local->iflist_mtx);
}
void ieee80211_offchannel_return(struct ieee80211_local *local,
- bool enable_beaconing)
+ bool enable_beaconing,
+ bool offchannel_ps_disable)
{
struct ieee80211_sub_if_data *sdata;
@@ -161,7 +166,8 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
continue;
/* Tell AP we're back */
- if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ if (offchannel_ps_disable &&
+ sdata->vif.type == NL80211_IFTYPE_STATION) {
if (sdata->u.mgd.associated)
ieee80211_offchannel_ps_disable(sdata);
}
@@ -181,7 +187,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
netif_tx_wake_all_queues(sdata->dev);
}
- /* re-enable beaconing */
+ /* Check to see if we should re-enable beaconing */
if (enable_beaconing &&
(sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_ADHOC ||
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index a6701ed..045b2fe 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -85,6 +85,9 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
if (len & 1) /* padding for RX_FLAGS if necessary */
len++;
+ if (status->flag & RX_FLAG_HT) /* HT info */
+ len += 3;
+
return len;
}
@@ -139,11 +142,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
/* IEEE80211_RADIOTAP_RATE */
if (status->flag & RX_FLAG_HT) {
/*
- * TODO: add following information into radiotap header once
- * suitable fields are defined for it:
- * - MCS index (status->rate_idx)
- * - HT40 (status->flag & RX_FLAG_40MHZ)
- * - short-GI (status->flag & RX_FLAG_SHORT_GI)
+ * MCS information is a separate field in radiotap,
+ * added below.
*/
*pos = 0;
} else {
@@ -193,6 +193,20 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
put_unaligned_le16(rx_flags, pos);
pos += 2;
+
+ if (status->flag & RX_FLAG_HT) {
+ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
+ *pos++ = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
+ IEEE80211_RADIOTAP_MCS_HAVE_GI |
+ IEEE80211_RADIOTAP_MCS_HAVE_BW;
+ *pos = 0;
+ if (status->flag & RX_FLAG_SHORT_GI)
+ *pos |= IEEE80211_RADIOTAP_MCS_SGI;
+ if (status->flag & RX_FLAG_40MHZ)
+ *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
+ pos++;
+ *pos++ = status->rate_idx;
+ }
}
/*
@@ -392,16 +406,10 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN)))
return RX_CONTINUE;
- if (test_bit(SCAN_HW_SCANNING, &local->scanning))
+ if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
+ test_bit(SCAN_SW_SCANNING, &local->scanning))
return ieee80211_scan_rx(rx->sdata, skb);
- if (test_bit(SCAN_SW_SCANNING, &local->scanning)) {
- /* drop all the other packets during a software scan anyway */
- if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED)
- dev_kfree_skb(skb);
- return RX_QUEUED;
- }
-
/* scanning finished during invoking of handlers */
I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
return RX_DROP_UNUSABLE;
@@ -798,7 +806,7 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
rx->local->dot11FrameDuplicateCount++;
rx->sta->num_duplicates++;
}
- return RX_DROP_MONITOR;
+ return RX_DROP_UNUSABLE;
} else
rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl;
}
@@ -1088,7 +1096,8 @@ static void ap_sta_ps_start(struct sta_info *sta)
atomic_inc(&sdata->bss->num_sta_ps);
set_sta_flags(sta, WLAN_STA_PS_STA);
- drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
+ if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
+ drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
sdata->name, sta->sta.addr, sta->sta.aid);
@@ -1117,6 +1126,27 @@ static void ap_sta_ps_end(struct sta_info *sta)
ieee80211_sta_ps_deliver_wakeup(sta);
}
+int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
+{
+ struct sta_info *sta_inf = container_of(sta, struct sta_info, sta);
+ bool in_ps;
+
+ WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS));
+
+ /* Don't let the same PS state be set twice */
+ in_ps = test_sta_flags(sta_inf, WLAN_STA_PS_STA);
+ if ((start && in_ps) || (!start && !in_ps))
+ return -EINVAL;
+
+ if (start)
+ ap_sta_ps_start(sta_inf);
+ else
+ ap_sta_ps_end(sta_inf);
+
+ return 0;
+}
+EXPORT_SYMBOL(ieee80211_sta_ps_transition);
+
static ieee80211_rx_result debug_noinline
ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
{
@@ -1161,7 +1191,8 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
* Change STA power saving mode only at the end of a frame
* exchange sequence.
*/
- if (!ieee80211_has_morefrags(hdr->frame_control) &&
+ if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) &&
+ !ieee80211_has_morefrags(hdr->frame_control) &&
!(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
(rx->sdata->vif.type == NL80211_IFTYPE_AP ||
rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
@@ -1556,17 +1587,36 @@ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
{
struct ieee80211_sub_if_data *sdata = rx->sdata;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
+ bool check_port_control = false;
+ struct ethhdr *ehdr;
+ int ret;
if (ieee80211_has_a4(hdr->frame_control) &&
sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
return -1;
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
+
+ if (!sdata->u.mgd.use_4addr)
+ return -1;
+ else
+ check_port_control = true;
+ }
+
if (is_multicast_ether_addr(hdr->addr1) &&
- ((sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) ||
- (sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr)))
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
+ return -1;
+
+ ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
+ if (ret < 0 || !check_port_control)
+ return ret;
+
+ ehdr = (struct ethhdr *) rx->skb->data;
+ if (ehdr->h_proto != rx->sdata->control_port_protocol)
return -1;
- return ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
+ return 0;
}
/*
@@ -1893,7 +1943,10 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
dev->stats.rx_bytes += rx->skb->len;
if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
- !is_multicast_ether_addr(((struct ethhdr *)rx->skb->data)->h_dest)) {
+ !is_multicast_ether_addr(
+ ((struct ethhdr *)rx->skb->data)->h_dest) &&
+ (!local->scanning &&
+ !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) {
mod_timer(&local->dynamic_ps_timer, jiffies +
msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
}
@@ -2590,7 +2643,8 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
return 0;
if (!multicast &&
compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
- if (!(sdata->dev->flags & IFF_PROMISC))
+ if (!(sdata->dev->flags & IFF_PROMISC) ||
+ sdata->u.mgd.use_4addr)
return 0;
status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
}
@@ -2639,7 +2693,8 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
return 0;
} else if (!ieee80211_bssid_match(bssid,
sdata->vif.addr)) {
- if (!(status->rx_flags & IEEE80211_RX_IN_SCAN))
+ if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
+ !ieee80211_is_beacon(hdr->frame_control))
return 0;
status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
}
@@ -2692,7 +2747,7 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
if (!skb) {
if (net_ratelimit())
wiphy_debug(local->hw.wiphy,
- "failed to copy multicast frame for %s\n",
+ "failed to copy skb for %s\n",
sdata->name);
return true;
}
@@ -2730,7 +2785,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
local->dot11ReceivedFragmentCount++;
if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
- test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
+ test_bit(SCAN_SW_SCANNING, &local->scanning)))
status->rx_flags |= IEEE80211_RX_IN_SCAN;
if (ieee80211_is_mgmt(fc))
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index fb274db..0ea6ada 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -196,7 +196,8 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
ieee802_11_parse_elems(elements, skb->len - baselen, &elems);
if (elems.ds_params && elems.ds_params_len == 1)
- freq = ieee80211_channel_to_frequency(elems.ds_params[0]);
+ freq = ieee80211_channel_to_frequency(elems.ds_params[0],
+ rx_status->band);
else
freq = rx_status->freq;
@@ -211,6 +212,14 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
if (bss)
ieee80211_rx_bss_put(sdata->local, bss);
+ /* If we are on-operating-channel, and this packet is for the
+ * current channel, pass the pkt on up the stack so that
+ * the rest of the stack can make use of it.
+ */
+ if (ieee80211_cfg_on_oper_channel(sdata->local)
+ && (channel == sdata->local->oper_channel))
+ return RX_CONTINUE;
+
dev_kfree_skb(skb);
return RX_QUEUED;
}
@@ -292,15 +301,31 @@ static void __ieee80211_scan_completed_finish(struct ieee80211_hw *hw,
bool was_hw_scan)
{
struct ieee80211_local *local = hw_to_local(hw);
+ bool on_oper_chan;
+ bool enable_beacons = false;
+
+ mutex_lock(&local->mtx);
+ on_oper_chan = ieee80211_cfg_on_oper_channel(local);
+
+ if (was_hw_scan || !on_oper_chan) {
+ if (WARN_ON(local->scan_channel))
+ local->scan_channel = NULL;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+ }
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
if (!was_hw_scan) {
+ bool on_oper_chan2;
ieee80211_configure_filter(local);
drv_sw_scan_complete(local);
- ieee80211_offchannel_return(local, true);
+ on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
+ /* We should always be on-channel at this point. */
+ WARN_ON(!on_oper_chan2);
+ if (on_oper_chan2 && (on_oper_chan != on_oper_chan2))
+ enable_beacons = true;
+
+ ieee80211_offchannel_return(local, enable_beacons, true);
}
- mutex_lock(&local->mtx);
ieee80211_recalc_idle(local);
mutex_unlock(&local->mtx);
@@ -340,13 +365,15 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
*/
drv_sw_scan_start(local);
- ieee80211_offchannel_stop_beaconing(local);
-
local->leave_oper_channel_time = 0;
local->next_scan_state = SCAN_DECISION;
local->scan_channel_idx = 0;
- drv_flush(local, false);
+ /* We always want to use off-channel PS, even if we
+ * are not really leaving oper-channel. Don't
+ * tell the AP though, as long as we are on-channel.
+ */
+ ieee80211_offchannel_enable_all_ps(local, false);
ieee80211_configure_filter(local);
@@ -486,7 +513,21 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
}
mutex_unlock(&local->iflist_mtx);
- if (local->scan_channel) {
+ next_chan = local->scan_req->channels[local->scan_channel_idx];
+
+ if (ieee80211_cfg_on_oper_channel(local)) {
+ /* We're currently on operating channel. */
+ if ((next_chan == local->oper_channel) &&
+ (local->_oper_channel_type == NL80211_CHAN_NO_HT))
+ /* We don't need to move off of operating channel. */
+ local->next_scan_state = SCAN_SET_CHANNEL;
+ else
+ /*
+ * We do need to leave operating channel, as next
+ * scan is somewhere else.
+ */
+ local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
+ } else {
/*
* we're currently scanning a different channel, let's
* see if we can scan another channel without interfering
@@ -502,7 +543,6 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
*
* Otherwise switch back to the operating channel.
*/
- next_chan = local->scan_req->channels[local->scan_channel_idx];
bad_latency = time_after(jiffies +
ieee80211_scan_get_channel_time(next_chan),
@@ -520,12 +560,6 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
local->next_scan_state = SCAN_ENTER_OPER_CHANNEL;
else
local->next_scan_state = SCAN_SET_CHANNEL;
- } else {
- /*
- * we're on the operating channel currently, let's
- * leave that channel now to scan another one
- */
- local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
}
*next_delay = 0;
@@ -534,9 +568,10 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local,
unsigned long *next_delay)
{
- ieee80211_offchannel_stop_station(local);
-
- __set_bit(SCAN_OFF_CHANNEL, &local->scanning);
+ /* PS will already be in off-channel mode,
+ * we do that once at the beginning of scanning.
+ */
+ ieee80211_offchannel_stop_vifs(local, false);
/*
* What if the nullfunc frames didn't arrive?
@@ -559,15 +594,15 @@ static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *loca
{
/* switch back to the operating channel */
local->scan_channel = NULL;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+ if (!ieee80211_cfg_on_oper_channel(local))
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
/*
- * Only re-enable station mode interface now; beaconing will be
- * re-enabled once the full scan has been completed.
+ * Re-enable vifs and beaconing. Leave PS
+ * in off-channel state..will put that back
+ * on-channel at the end of scanning.
*/
- ieee80211_offchannel_return(local, false);
-
- __clear_bit(SCAN_OFF_CHANNEL, &local->scanning);
+ ieee80211_offchannel_return(local, true, false);
*next_delay = HZ / 5;
local->next_scan_state = SCAN_DECISION;
@@ -583,8 +618,12 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
chan = local->scan_req->channels[local->scan_channel_idx];
local->scan_channel = chan;
- if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
- skip = 1;
+
+ /* Only call hw-config if we really need to change channels. */
+ if ((chan != local->hw.conf.channel) ||
+ (local->hw.conf.channel_type != NL80211_CHAN_NO_HT))
+ if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
+ skip = 1;
/* advance state machine to next channel/band */
local->scan_channel_idx++;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index c426504..5a11078 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -899,7 +899,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
struct ieee80211_local *local = sdata->local;
int sent, buffered;
- drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
+ if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
+ drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
if (!skb_queue_empty(&sta->ps_tx_buf))
sta_info_clear_tim_bit(sta);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index bbdd2a8..ca0b690 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -82,6 +82,7 @@ enum ieee80211_sta_info_flags {
* @state: session state (see above)
* @stop_initiator: initiator of a session stop
* @tx_stop: TX DelBA frame when stopping
+ * @buf_size: reorder buffer size at receiver
*
* This structure's lifetime is managed by RCU, assignments to
* the array holding it must hold the aggregation mutex.
@@ -101,6 +102,7 @@ struct tid_ampdu_tx {
u8 dialog_token;
u8 stop_initiator;
bool tx_stop;
+ u8 buf_size;
};
/**
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 071ac95..010a559 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -98,6 +98,10 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
* (b) always process RX events before TX status events if ordering
* can be unknown, for example with different interrupt status
* bits.
+ * (c) if PS mode transitions are manual (i.e. the flag
+ * %IEEE80211_HW_AP_LINK_PS is set), always process PS state
+ * changes before calling TX status events if ordering can be
+ * unknown.
*/
if (test_sta_flags(sta, WLAN_STA_PS_STA) &&
skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index b0beaa5..17ef4f4 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -257,7 +257,8 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
return TX_CONTINUE;
- if (unlikely(test_bit(SCAN_OFF_CHANNEL, &tx->local->scanning)) &&
+ if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) &&
+ test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) &&
!ieee80211_is_probe_req(hdr->frame_control) &&
!ieee80211_is_nullfunc(hdr->frame_control))
/*
@@ -1394,7 +1395,8 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
/* handlers after fragment must be aware of tx info fragmentation! */
CALL_TXH(ieee80211_tx_h_stats);
CALL_TXH(ieee80211_tx_h_encrypt);
- CALL_TXH(ieee80211_tx_h_calculate_duration);
+ if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
+ CALL_TXH(ieee80211_tx_h_calculate_duration);
#undef CALL_TXH
txh_done:
@@ -1750,7 +1752,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
__le16 fc;
struct ieee80211_hdr hdr;
struct ieee80211s_hdr mesh_hdr __maybe_unused;
- struct mesh_path *mppath = NULL;
+ struct mesh_path __maybe_unused *mppath = NULL;
const u8 *encaps_data;
int encaps_len, skip_header_bytes;
int nh_pos, h_pos;
@@ -1815,19 +1817,19 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
mppath = mpp_path_lookup(skb->data, sdata);
/*
- * Do not use address extension, if it is a packet from
- * the same interface and the destination is not being
- * proxied by any other mest point.
+ * Use address extension if it is a packet from
+ * another interface or if we know the destination
+ * is being proxied by a portal (i.e. portal address
+ * differs from proxied address)
*/
if (compare_ether_addr(sdata->vif.addr,
skb->data + ETH_ALEN) == 0 &&
- (!mppath || !compare_ether_addr(mppath->mpp, skb->data))) {
+ !(mppath && compare_ether_addr(mppath->mpp, skb->data))) {
hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
skb->data, skb->data + ETH_ALEN);
meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
sdata, NULL, NULL);
} else {
- /* packet from other interface */
int is_mesh_mcast = 1;
const u8 *mesh_da;
@@ -2178,6 +2180,8 @@ static void ieee80211_beacon_add_tim(struct ieee80211_if_ap *bss,
if (bss->dtim_count == 0 && !skb_queue_empty(&bss->ps_bc_buf))
aid0 = 1;
+ bss->dtim_bc_mc = aid0 == 1;
+
if (have_bits) {
/* Find largest even number N1 so that bits numbered 1 through
* (N1 x 8) - 1 in the bitmap are 0 and number N2 so that bits
@@ -2241,7 +2245,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
if (sdata->vif.type == NL80211_IFTYPE_AP) {
ap = &sdata->u.ap;
beacon = rcu_dereference(ap->beacon);
- if (ap && beacon) {
+ if (beacon) {
/*
* headroom, head length,
* tail length and maximum TIM length
@@ -2302,6 +2306,11 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
struct ieee80211_mgmt *mgmt;
u8 *pos;
+#ifdef CONFIG_MAC80211_MESH
+ if (!sdata->u.mesh.mesh_id_len)
+ goto out;
+#endif
+
/* headroom, head length, tail length and maximum TIM length */
skb = dev_alloc_skb(local->tx_headroom + 400 +
sdata->u.mesh.vendor_ie_len);
@@ -2543,7 +2552,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
if (sdata->vif.type != NL80211_IFTYPE_AP || !beacon || !beacon->head)
goto out;
- if (bss->dtim_count != 0)
+ if (bss->dtim_count != 0 || !bss->dtim_bc_mc)
goto out; /* send buffered bc/mc only after DTIM beacon */
while (1) {
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index 36305e0..6bf787a 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -924,18 +924,44 @@ static void ieee80211_work_work(struct work_struct *work)
}
if (!started && !local->tmp_channel) {
- /*
- * TODO: could optimize this by leaving the
- * station vifs in awake mode if they
- * happen to be on the same channel as
- * the requested channel
- */
- ieee80211_offchannel_stop_beaconing(local);
- ieee80211_offchannel_stop_station(local);
+ bool on_oper_chan;
+ bool tmp_chan_changed = false;
+ bool on_oper_chan2;
+ on_oper_chan = ieee80211_cfg_on_oper_channel(local);
+ if (local->tmp_channel)
+ if ((local->tmp_channel != wk->chan) ||
+ (local->tmp_channel_type != wk->chan_type))
+ tmp_chan_changed = true;
local->tmp_channel = wk->chan;
local->tmp_channel_type = wk->chan_type;
- ieee80211_hw_config(local, 0);
+ /*
+ * Leave the station vifs in awake mode if they
+ * happen to be on the same channel as
+ * the requested channel.
+ */
+ on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
+ if (on_oper_chan != on_oper_chan2) {
+ if (on_oper_chan2) {
+ /* going off oper channel, PS too */
+ ieee80211_offchannel_stop_vifs(local,
+ true);
+ ieee80211_hw_config(local, 0);
+ } else {
+ /* going on channel, but leave PS
+ * off-channel. */
+ ieee80211_hw_config(local, 0);
+ ieee80211_offchannel_return(local,
+ true,
+ false);
+ }
+ } else if (tmp_chan_changed)
+ /* Still off-channel, but on some other
+ * channel, so update hardware.
+ * PS should already be off-channel.
+ */
+ ieee80211_hw_config(local, 0);
+
started = true;
wk->timeout = jiffies;
}
@@ -1011,9 +1037,27 @@ static void ieee80211_work_work(struct work_struct *work)
}
if (!remain_off_channel && local->tmp_channel) {
+ bool on_oper_chan = ieee80211_cfg_on_oper_channel(local);
local->tmp_channel = NULL;
- ieee80211_hw_config(local, 0);
- ieee80211_offchannel_return(local, true);
+ /* If tmp_channel wasn't operating channel, then
+ * we need to go back on-channel.
+ * NOTE: If we can ever be here while scannning,
+ * or if the hw_config() channel config logic changes,
+ * then we may need to do a more thorough check to see if
+ * we still need to do a hardware config. Currently,
+ * we cannot be here while scanning, however.
+ */
+ if (ieee80211_cfg_on_oper_channel(local) && !on_oper_chan)
+ ieee80211_hw_config(local, 0);
+
+ /* At the least, we need to disable offchannel_ps,
+ * so just go ahead and run the entire offchannel
+ * return logic here. We *could* skip enabling
+ * beaconing if we were already on-oper-channel
+ * as a future optimization.
+ */
+ ieee80211_offchannel_return(local, true, true);
+
/* give connection some time to breathe */
run_again(local, jiffies + HZ/2);
}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index bee230d..f1765de 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -26,13 +26,12 @@
ieee80211_tx_result
ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
{
- u8 *data, *key, *mic, key_offset;
+ u8 *data, *key, *mic;
size_t data_len;
unsigned int hdrlen;
struct ieee80211_hdr *hdr;
struct sk_buff *skb = tx->skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- int authenticator;
int tail;
hdr = (struct ieee80211_hdr *)skb->data;
@@ -47,6 +46,11 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
data = skb->data + hdrlen;
data_len = skb->len - hdrlen;
+ if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE)) {
+ /* Need to use software crypto for the test */
+ info->control.hw_key = NULL;
+ }
+
if (info->control.hw_key &&
!(tx->flags & IEEE80211_TX_FRAGMENTED) &&
!(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
@@ -62,17 +66,11 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
skb_headroom(skb) < TKIP_IV_LEN))
return TX_DROP;
-#if 0
- authenticator = fc & IEEE80211_FCTL_FROMDS; /* FIX */
-#else
- authenticator = 1;
-#endif
- key_offset = authenticator ?
- NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY :
- NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
- key = &tx->key->conf.key[key_offset];
+ key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY];
mic = skb_put(skb, MICHAEL_MIC_LEN);
michael_mic(key, hdr, data, data_len, mic);
+ if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE))
+ mic[0]++;
return TX_CONTINUE;
}
@@ -81,14 +79,13 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
ieee80211_rx_result
ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
{
- u8 *data, *key = NULL, key_offset;
+ u8 *data, *key = NULL;
size_t data_len;
unsigned int hdrlen;
u8 mic[MICHAEL_MIC_LEN];
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int authenticator = 1, wpa_test = 0;
/* No way to verify the MIC if the hardware stripped it */
if (status->flag & RX_FLAG_MMIC_STRIPPED)
@@ -106,17 +103,9 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
data = skb->data + hdrlen;
data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
-#if 0
- authenticator = fc & IEEE80211_FCTL_TODS; /* FIX */
-#else
- authenticator = 1;
-#endif
- key_offset = authenticator ?
- NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY :
- NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
- key = &rx->key->conf.key[key_offset];
+ key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
michael_mic(key, hdr, data, data_len, mic);
- if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0 || wpa_test) {
+ if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0) {
if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
return RX_DROP_UNUSABLE;
@@ -208,7 +197,7 @@ ieee80211_rx_result
ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
- int hdrlen, res, hwaccel = 0, wpa_test = 0;
+ int hdrlen, res, hwaccel = 0;
struct ieee80211_key *key = rx->key;
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
@@ -235,7 +224,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
hdr->addr1, hwaccel, rx->queue,
&rx->tkip_iv32,
&rx->tkip_iv16);
- if (res != TKIP_DECRYPT_OK || wpa_test)
+ if (res != TKIP_DECRYPT_OK)
return RX_DROP_UNUSABLE;
/* Trim ICV */
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 1534f2b..82a6e0d 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -85,6 +85,17 @@ config NF_CONNTRACK_EVENTS
If unsure, say `N'.
+config NF_CONNTRACK_TIMESTAMP
+ bool 'Connection tracking timestamping'
+ depends on NETFILTER_ADVANCED
+ help
+ This option enables support for connection tracking timestamping.
+ This allows you to store the flow start-time and to obtain
+ the flow-stop time (once it has been destroyed) via Connection
+ tracking events.
+
+ If unsure, say `N'.
+
config NF_CT_PROTO_DCCP
tristate 'DCCP protocol connection tracking support (EXPERIMENTAL)'
depends on EXPERIMENTAL
@@ -185,9 +196,13 @@ config NF_CONNTRACK_IRC
To compile it as a module, choose M here. If unsure, say N.
+config NF_CONNTRACK_BROADCAST
+ tristate
+
config NF_CONNTRACK_NETBIOS_NS
tristate "NetBIOS name service protocol support"
depends on NETFILTER_ADVANCED
+ select NF_CONNTRACK_BROADCAST
help
NetBIOS name service requests are sent as broadcast messages from an
unprivileged port and responded to with unicast messages to the
@@ -204,6 +219,21 @@ config NF_CONNTRACK_NETBIOS_NS
To compile it as a module, choose M here. If unsure, say N.
+config NF_CONNTRACK_SNMP
+ tristate "SNMP service protocol support"
+ depends on NETFILTER_ADVANCED
+ select NF_CONNTRACK_BROADCAST
+ help
+ SNMP service requests are sent as broadcast messages from an
+ unprivileged port and responded to with unicast messages to the
+ same port. This make them hard to firewall properly because connection
+ tracking doesn't deal with broadcasts. This helper tracks locally
+ originating SNMP service requests and the corresponding
+ responses. It relies on correct IP address configuration, specifically
+ netmask and broadcast address.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NF_CONNTRACK_PPTP
tristate "PPtP protocol support"
depends on NETFILTER_ADVANCED
@@ -322,10 +352,32 @@ config NETFILTER_XT_CONNMARK
ctmark), similarly to the packet mark (nfmark). Using this
target and match, you can set and match on this mark.
+config NETFILTER_XT_SET
+ tristate 'set target and match support'
+ depends on IP_SET
+ depends on NETFILTER_ADVANCED
+ help
+ This option adds the "SET" target and "set" match.
+
+ Using this target and match, you can add/delete and match
+ elements in the sets created by ipset(8).
+
+ To compile it as a module, choose M here. If unsure, say N.
+
# alphabetically ordered list of targets
comment "Xtables targets"
+config NETFILTER_XT_TARGET_AUDIT
+ tristate "AUDIT target support"
+ depends on AUDIT
+ depends on NETFILTER_ADVANCED
+ ---help---
+ This option adds a 'AUDIT' target, which can be used to create
+ audit records for packets dropped/accepted.
+
+ To compileit as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_TARGET_CHECKSUM
tristate "CHECKSUM target support"
depends on IP_NF_MANGLE || IP6_NF_MANGLE
@@ -477,6 +529,7 @@ config NETFILTER_XT_TARGET_NFLOG
config NETFILTER_XT_TARGET_NFQUEUE
tristate '"NFQUEUE" target Support'
depends on NETFILTER_ADVANCED
+ select NETFILTER_NETLINK_QUEUE
help
This target replaced the old obsolete QUEUE target.
@@ -685,6 +738,15 @@ config NETFILTER_XT_MATCH_DCCP
If you want to compile it as a module, say M here and read
<file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
+config NETFILTER_XT_MATCH_DEVGROUP
+ tristate '"devgroup" match support'
+ depends on NETFILTER_ADVANCED
+ help
+ This options adds a `devgroup' match, which allows to match on the
+ device group a network device is assigned to.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_MATCH_DSCP
tristate '"dscp" and "tos" match support'
depends on NETFILTER_ADVANCED
@@ -886,7 +948,7 @@ config NETFILTER_XT_MATCH_RATEEST
config NETFILTER_XT_MATCH_REALM
tristate '"realm" match support'
depends on NETFILTER_ADVANCED
- select NET_CLS_ROUTE
+ select IP_ROUTE_CLASSID
help
This option adds a `realm' match, which allows you to use the realm
key from the routing subsystem inside iptables.
@@ -1011,4 +1073,6 @@ endif # NETFILTER_XTABLES
endmenu
+source "net/netfilter/ipset/Kconfig"
+
source "net/netfilter/ipvs/Kconfig"
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 441050f..d57a890 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -1,6 +1,7 @@
netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o
+nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
obj-$(CONFIG_NETFILTER) = netfilter.o
@@ -28,7 +29,9 @@ obj-$(CONFIG_NF_CONNTRACK_AMANDA) += nf_conntrack_amanda.o
obj-$(CONFIG_NF_CONNTRACK_FTP) += nf_conntrack_ftp.o
obj-$(CONFIG_NF_CONNTRACK_H323) += nf_conntrack_h323.o
obj-$(CONFIG_NF_CONNTRACK_IRC) += nf_conntrack_irc.o
+obj-$(CONFIG_NF_CONNTRACK_BROADCAST) += nf_conntrack_broadcast.o
obj-$(CONFIG_NF_CONNTRACK_NETBIOS_NS) += nf_conntrack_netbios_ns.o
+obj-$(CONFIG_NF_CONNTRACK_SNMP) += nf_conntrack_snmp.o
obj-$(CONFIG_NF_CONNTRACK_PPTP) += nf_conntrack_pptp.o
obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o
obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o
@@ -43,8 +46,10 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
# combos
obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o
obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o
+obj-$(CONFIG_NETFILTER_XT_SET) += xt_set.o
# targets
+obj-$(CONFIG_NETFILTER_XT_TARGET_AUDIT) += xt_AUDIT.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CHECKSUM) += xt_CHECKSUM.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
@@ -72,6 +77,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNLIMIT) += xt_connlimit.o
obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
obj-$(CONFIG_NETFILTER_XT_MATCH_CPU) += xt_cpu.o
obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
@@ -101,5 +107,8 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o
obj-$(CONFIG_NETFILTER_XT_MATCH_TIME) += xt_time.o
obj-$(CONFIG_NETFILTER_XT_MATCH_U32) += xt_u32.o
+# ipset
+obj-$(CONFIG_IP_SET) += ipset/
+
# IPVS
obj-$(CONFIG_IP_VS) += ipvs/
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 4aa614b..899b71c 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -176,13 +176,21 @@ next_hook:
ret = 1;
} else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
kfree_skb(skb);
- ret = -(verdict >> NF_VERDICT_BITS);
+ ret = NF_DROP_GETERR(verdict);
if (ret == 0)
ret = -EPERM;
} else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
- if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
- verdict >> NF_VERDICT_BITS))
- goto next_hook;
+ ret = nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
+ verdict >> NF_VERDICT_QBITS);
+ if (ret < 0) {
+ if (ret == -ECANCELED)
+ goto next_hook;
+ if (ret == -ESRCH &&
+ (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
+ goto next_hook;
+ kfree_skb(skb);
+ }
+ ret = 0;
}
rcu_read_unlock();
return ret;
@@ -215,7 +223,7 @@ EXPORT_SYMBOL(skb_make_writable);
/* This does not belong here, but locally generated errors need it if connection
tracking in use: without this, connection may not be in hash table, and hence
manufactured ICMP or RST packets will not be associated with it. */
-void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
+void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu __read_mostly;
EXPORT_SYMBOL(ip_ct_attach);
void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
@@ -232,7 +240,7 @@ void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
}
EXPORT_SYMBOL(nf_ct_attach);
-void (*nf_ct_destroy)(struct nf_conntrack *);
+void (*nf_ct_destroy)(struct nf_conntrack *) __rcu __read_mostly;
EXPORT_SYMBOL(nf_ct_destroy);
void nf_conntrack_destroy(struct nf_conntrack *nfct)
diff --git a/net/netfilter/ipset/Kconfig b/net/netfilter/ipset/Kconfig
new file mode 100644
index 0000000..3b970d34
--- /dev/null
+++ b/net/netfilter/ipset/Kconfig
@@ -0,0 +1,121 @@
+menuconfig IP_SET
+ tristate "IP set support"
+ depends on INET && NETFILTER
+ help
+ This option adds IP set support to the kernel.
+ In order to define and use the sets, you need the userspace utility
+ ipset(8). You can use the sets in netfilter via the "set" match
+ and "SET" target.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+if IP_SET
+
+config IP_SET_MAX
+ int "Maximum number of IP sets"
+ default 256
+ range 2 65534
+ depends on IP_SET
+ help
+ You can define here default value of the maximum number
+ of IP sets for the kernel.
+
+ The value can be overriden by the 'max_sets' module
+ parameter of the 'ip_set' module.
+
+config IP_SET_BITMAP_IP
+ tristate "bitmap:ip set support"
+ depends on IP_SET
+ help
+ This option adds the bitmap:ip set type support, by which one
+ can store IPv4 addresses (or network addresse) from a range.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_BITMAP_IPMAC
+ tristate "bitmap:ip,mac set support"
+ depends on IP_SET
+ help
+ This option adds the bitmap:ip,mac set type support, by which one
+ can store IPv4 address and (source) MAC address pairs from a range.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_BITMAP_PORT
+ tristate "bitmap:port set support"
+ depends on IP_SET
+ help
+ This option adds the bitmap:port set type support, by which one
+ can store TCP/UDP port numbers from a range.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_HASH_IP
+ tristate "hash:ip set support"
+ depends on IP_SET
+ help
+ This option adds the hash:ip set type support, by which one
+ can store arbitrary IPv4 or IPv6 addresses (or network addresses)
+ in a set.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_HASH_IPPORT
+ tristate "hash:ip,port set support"
+ depends on IP_SET
+ help
+ This option adds the hash:ip,port set type support, by which one
+ can store IPv4/IPv6 address and protocol/port pairs.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_HASH_IPPORTIP
+ tristate "hash:ip,port,ip set support"
+ depends on IP_SET
+ help
+ This option adds the hash:ip,port,ip set type support, by which
+ one can store IPv4/IPv6 address, protocol/port, and IPv4/IPv6
+ address triples in a set.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_HASH_IPPORTNET
+ tristate "hash:ip,port,net set support"
+ depends on IP_SET
+ help
+ This option adds the hash:ip,port,net set type support, by which
+ one can store IPv4/IPv6 address, protocol/port, and IPv4/IPv6
+ network address/prefix triples in a set.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_HASH_NET
+ tristate "hash:net set support"
+ depends on IP_SET
+ help
+ This option adds the hash:net set type support, by which
+ one can store IPv4/IPv6 network address/prefix elements in a set.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_HASH_NETPORT
+ tristate "hash:net,port set support"
+ depends on IP_SET
+ help
+ This option adds the hash:net,port set type support, by which
+ one can store IPv4/IPv6 network address/prefix and
+ protocol/port pairs as elements in a set.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config IP_SET_LIST_SET
+ tristate "list:set set support"
+ depends on IP_SET
+ help
+ This option adds the list:set set type support. In this
+ kind of set one can store the name of other sets and it forms
+ an ordered union of the member sets.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+endif # IP_SET
diff --git a/net/netfilter/ipset/Makefile b/net/netfilter/ipset/Makefile
new file mode 100644
index 0000000..5adbdab
--- /dev/null
+++ b/net/netfilter/ipset/Makefile
@@ -0,0 +1,24 @@
+#
+# Makefile for the ipset modules
+#
+
+ip_set-y := ip_set_core.o ip_set_getport.o pfxlen.o
+
+# ipset core
+obj-$(CONFIG_IP_SET) += ip_set.o
+
+# bitmap types
+obj-$(CONFIG_IP_SET_BITMAP_IP) += ip_set_bitmap_ip.o
+obj-$(CONFIG_IP_SET_BITMAP_IPMAC) += ip_set_bitmap_ipmac.o
+obj-$(CONFIG_IP_SET_BITMAP_PORT) += ip_set_bitmap_port.o
+
+# hash types
+obj-$(CONFIG_IP_SET_HASH_IP) += ip_set_hash_ip.o
+obj-$(CONFIG_IP_SET_HASH_IPPORT) += ip_set_hash_ipport.o
+obj-$(CONFIG_IP_SET_HASH_IPPORTIP) += ip_set_hash_ipportip.o
+obj-$(CONFIG_IP_SET_HASH_IPPORTNET) += ip_set_hash_ipportnet.o
+obj-$(CONFIG_IP_SET_HASH_NET) += ip_set_hash_net.o
+obj-$(CONFIG_IP_SET_HASH_NETPORT) += ip_set_hash_netport.o
+
+# list types
+obj-$(CONFIG_IP_SET_LIST_SET) += ip_set_list_set.o
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
new file mode 100644
index 0000000..bca9699
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -0,0 +1,587 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the bitmap:ip type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+#include <linux/spinlock.h>
+#include <linux/netlink.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_bitmap.h>
+#define IP_SET_BITMAP_TIMEOUT
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("bitmap:ip type of IP sets");
+MODULE_ALIAS("ip_set_bitmap:ip");
+
+/* Type structure */
+struct bitmap_ip {
+ void *members; /* the set members */
+ u32 first_ip; /* host byte order, included in range */
+ u32 last_ip; /* host byte order, included in range */
+ u32 elements; /* number of max elements in the set */
+ u32 hosts; /* number of hosts in a subnet */
+ size_t memsize; /* members size */
+ u8 netmask; /* subnet netmask */
+ u32 timeout; /* timeout parameter */
+ struct timer_list gc; /* garbage collection */
+};
+
+/* Base variant */
+
+static inline u32
+ip_to_id(const struct bitmap_ip *m, u32 ip)
+{
+ return ((ip & ip_set_hostmask(m->netmask)) - m->first_ip)/m->hosts;
+}
+
+static int
+bitmap_ip_test(struct ip_set *set, void *value, u32 timeout)
+{
+ const struct bitmap_ip *map = set->data;
+ u16 id = *(u16 *)value;
+
+ return !!test_bit(id, map->members);
+}
+
+static int
+bitmap_ip_add(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ip *map = set->data;
+ u16 id = *(u16 *)value;
+
+ if (test_and_set_bit(id, map->members))
+ return -IPSET_ERR_EXIST;
+
+ return 0;
+}
+
+static int
+bitmap_ip_del(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ip *map = set->data;
+ u16 id = *(u16 *)value;
+
+ if (!test_and_clear_bit(id, map->members))
+ return -IPSET_ERR_EXIST;
+
+ return 0;
+}
+
+static int
+bitmap_ip_list(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct bitmap_ip *map = set->data;
+ struct nlattr *atd, *nested;
+ u32 id, first = cb->args[2];
+
+ atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!atd)
+ return -EMSGSIZE;
+ for (; cb->args[2] < map->elements; cb->args[2]++) {
+ id = cb->args[2];
+ if (!test_bit(id, map->members))
+ continue;
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (id == first) {
+ nla_nest_cancel(skb, atd);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+ htonl(map->first_ip + id * map->hosts));
+ ipset_nest_end(skb, nested);
+ }
+ ipset_nest_end(skb, atd);
+ /* Set listing finished */
+ cb->args[2] = 0;
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nested);
+ ipset_nest_end(skb, atd);
+ if (unlikely(id == first)) {
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+/* Timeout variant */
+
+static int
+bitmap_ip_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+ const struct bitmap_ip *map = set->data;
+ const unsigned long *members = map->members;
+ u16 id = *(u16 *)value;
+
+ return ip_set_timeout_test(members[id]);
+}
+
+static int
+bitmap_ip_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ip *map = set->data;
+ unsigned long *members = map->members;
+ u16 id = *(u16 *)value;
+
+ if (ip_set_timeout_test(members[id]))
+ return -IPSET_ERR_EXIST;
+
+ members[id] = ip_set_timeout_set(timeout);
+
+ return 0;
+}
+
+static int
+bitmap_ip_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ip *map = set->data;
+ unsigned long *members = map->members;
+ u16 id = *(u16 *)value;
+ int ret = -IPSET_ERR_EXIST;
+
+ if (ip_set_timeout_test(members[id]))
+ ret = 0;
+
+ members[id] = IPSET_ELEM_UNSET;
+ return ret;
+}
+
+static int
+bitmap_ip_tlist(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct bitmap_ip *map = set->data;
+ struct nlattr *adt, *nested;
+ u32 id, first = cb->args[2];
+ const unsigned long *members = map->members;
+
+ adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!adt)
+ return -EMSGSIZE;
+ for (; cb->args[2] < map->elements; cb->args[2]++) {
+ id = cb->args[2];
+ if (!ip_set_timeout_test(members[id]))
+ continue;
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (id == first) {
+ nla_nest_cancel(skb, adt);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+ htonl(map->first_ip + id * map->hosts));
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(members[id])));
+ ipset_nest_end(skb, nested);
+ }
+ ipset_nest_end(skb, adt);
+
+ /* Set listing finished */
+ cb->args[2] = 0;
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nested);
+ ipset_nest_end(skb, adt);
+ if (unlikely(id == first)) {
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+static int
+bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ struct bitmap_ip *map = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ u32 ip;
+
+ ip = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ ip = ip_to_id(map, ip);
+
+ return adtfn(set, &ip, map->timeout);
+}
+
+static int
+bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ struct bitmap_ip *map = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ u32 timeout = map->timeout;
+ u32 ip, ip_to, id;
+ int ret = 0;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+ if (ret)
+ return ret;
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(map->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST) {
+ id = ip_to_id(map, ip);
+ return adtfn(set, &id, timeout);
+ }
+
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+ if (ret)
+ return ret;
+ if (ip > ip_to) {
+ swap(ip, ip_to);
+ if (ip < map->first_ip)
+ return -IPSET_ERR_BITMAP_RANGE;
+ }
+ } else if (tb[IPSET_ATTR_CIDR]) {
+ u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (cidr > 32)
+ return -IPSET_ERR_INVALID_CIDR;
+ ip &= ip_set_hostmask(cidr);
+ ip_to = ip | ~ip_set_hostmask(cidr);
+ } else
+ ip_to = ip;
+
+ if (ip_to > map->last_ip)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ for (; !before(ip_to, ip); ip += map->hosts) {
+ id = ip_to_id(map, ip);
+ ret = adtfn(set, &id, timeout);;
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+static void
+bitmap_ip_destroy(struct ip_set *set)
+{
+ struct bitmap_ip *map = set->data;
+
+ if (with_timeout(map->timeout))
+ del_timer_sync(&map->gc);
+
+ ip_set_free(map->members);
+ kfree(map);
+
+ set->data = NULL;
+}
+
+static void
+bitmap_ip_flush(struct ip_set *set)
+{
+ struct bitmap_ip *map = set->data;
+
+ memset(map->members, 0, map->memsize);
+}
+
+static int
+bitmap_ip_head(struct ip_set *set, struct sk_buff *skb)
+{
+ const struct bitmap_ip *map = set->data;
+ struct nlattr *nested;
+
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested)
+ goto nla_put_failure;
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
+ if (map->netmask != 32)
+ NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask);
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+ htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+ htonl(sizeof(*map) + map->memsize));
+ if (with_timeout(map->timeout))
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+ ipset_nest_end(skb, nested);
+
+ return 0;
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static bool
+bitmap_ip_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct bitmap_ip *x = a->data;
+ const struct bitmap_ip *y = b->data;
+
+ return x->first_ip == y->first_ip &&
+ x->last_ip == y->last_ip &&
+ x->netmask == y->netmask &&
+ x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant bitmap_ip = {
+ .kadt = bitmap_ip_kadt,
+ .uadt = bitmap_ip_uadt,
+ .adt = {
+ [IPSET_ADD] = bitmap_ip_add,
+ [IPSET_DEL] = bitmap_ip_del,
+ [IPSET_TEST] = bitmap_ip_test,
+ },
+ .destroy = bitmap_ip_destroy,
+ .flush = bitmap_ip_flush,
+ .head = bitmap_ip_head,
+ .list = bitmap_ip_list,
+ .same_set = bitmap_ip_same_set,
+};
+
+static const struct ip_set_type_variant bitmap_tip = {
+ .kadt = bitmap_ip_kadt,
+ .uadt = bitmap_ip_uadt,
+ .adt = {
+ [IPSET_ADD] = bitmap_ip_tadd,
+ [IPSET_DEL] = bitmap_ip_tdel,
+ [IPSET_TEST] = bitmap_ip_ttest,
+ },
+ .destroy = bitmap_ip_destroy,
+ .flush = bitmap_ip_flush,
+ .head = bitmap_ip_head,
+ .list = bitmap_ip_tlist,
+ .same_set = bitmap_ip_same_set,
+};
+
+static void
+bitmap_ip_gc(unsigned long ul_set)
+{
+ struct ip_set *set = (struct ip_set *) ul_set;
+ struct bitmap_ip *map = set->data;
+ unsigned long *table = map->members;
+ u32 id;
+
+ /* We run parallel with other readers (test element)
+ * but adding/deleting new entries is locked out */
+ read_lock_bh(&set->lock);
+ for (id = 0; id < map->elements; id++)
+ if (ip_set_timeout_expired(table[id]))
+ table[id] = IPSET_ELEM_UNSET;
+ read_unlock_bh(&set->lock);
+
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+static void
+bitmap_ip_gc_init(struct ip_set *set)
+{
+ struct bitmap_ip *map = set->data;
+
+ init_timer(&map->gc);
+ map->gc.data = (unsigned long) set;
+ map->gc.function = bitmap_ip_gc;
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+/* Create bitmap:ip type of sets */
+
+static bool
+init_map_ip(struct ip_set *set, struct bitmap_ip *map,
+ u32 first_ip, u32 last_ip,
+ u32 elements, u32 hosts, u8 netmask)
+{
+ map->members = ip_set_alloc(map->memsize);
+ if (!map->members)
+ return false;
+ map->first_ip = first_ip;
+ map->last_ip = last_ip;
+ map->elements = elements;
+ map->hosts = hosts;
+ map->netmask = netmask;
+ map->timeout = IPSET_NO_TIMEOUT;
+
+ set->data = map;
+ set->family = AF_INET;
+
+ return true;
+}
+
+static int
+bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ struct bitmap_ip *map;
+ u32 first_ip, last_ip, hosts, elements;
+ u8 netmask = 32;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
+ if (ret)
+ return ret;
+ if (first_ip > last_ip) {
+ u32 tmp = first_ip;
+
+ first_ip = last_ip;
+ last_ip = tmp;
+ }
+ } else if (tb[IPSET_ATTR_CIDR]) {
+ u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (cidr >= 32)
+ return -IPSET_ERR_INVALID_CIDR;
+ last_ip = first_ip | ~ip_set_hostmask(cidr);
+ } else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_NETMASK]) {
+ netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
+
+ if (netmask > 32)
+ return -IPSET_ERR_INVALID_NETMASK;
+
+ first_ip &= ip_set_hostmask(netmask);
+ last_ip |= ~ip_set_hostmask(netmask);
+ }
+
+ if (netmask == 32) {
+ hosts = 1;
+ elements = last_ip - first_ip + 1;
+ } else {
+ u8 mask_bits;
+ u32 mask;
+
+ mask = range_to_mask(first_ip, last_ip, &mask_bits);
+
+ if ((!mask && (first_ip || last_ip != 0xFFFFFFFF)) ||
+ netmask <= mask_bits)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ pr_debug("mask_bits %u, netmask %u\n", mask_bits, netmask);
+ hosts = 2 << (32 - netmask - 1);
+ elements = 2 << (netmask - mask_bits - 1);
+ }
+ if (elements > IPSET_BITMAP_MAX_RANGE + 1)
+ return -IPSET_ERR_BITMAP_RANGE_SIZE;
+
+ pr_debug("hosts %u, elements %u\n", hosts, elements);
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ map->memsize = elements * sizeof(unsigned long);
+
+ if (!init_map_ip(set, map, first_ip, last_ip,
+ elements, hosts, netmask)) {
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ set->variant = &bitmap_tip;
+
+ bitmap_ip_gc_init(set);
+ } else {
+ map->memsize = bitmap_bytes(0, elements - 1);
+
+ if (!init_map_ip(set, map, first_ip, last_ip,
+ elements, hosts, netmask)) {
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ set->variant = &bitmap_ip;
+ }
+ return 0;
+}
+
+static struct ip_set_type bitmap_ip_type __read_mostly = {
+ .name = "bitmap:ip",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP,
+ .dimension = IPSET_DIM_ONE,
+ .family = AF_INET,
+ .revision = 0,
+ .create = bitmap_ip_create,
+ .create_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_NETMASK] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+bitmap_ip_init(void)
+{
+ return ip_set_type_register(&bitmap_ip_type);
+}
+
+static void __exit
+bitmap_ip_fini(void)
+{
+ ip_set_type_unregister(&bitmap_ip_type);
+}
+
+module_init(bitmap_ip_init);
+module_exit(bitmap_ip_fini);
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
new file mode 100644
index 0000000..5e79017
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -0,0 +1,652 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the bitmap:ip,mac type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/if_ether.h>
+#include <linux/netlink.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_bitmap.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("bitmap:ip,mac type of IP sets");
+MODULE_ALIAS("ip_set_bitmap:ip,mac");
+
+enum {
+ MAC_EMPTY, /* element is not set */
+ MAC_FILLED, /* element is set with MAC */
+ MAC_UNSET, /* element is set, without MAC */
+};
+
+/* Type structure */
+struct bitmap_ipmac {
+ void *members; /* the set members */
+ u32 first_ip; /* host byte order, included in range */
+ u32 last_ip; /* host byte order, included in range */
+ u32 timeout; /* timeout value */
+ struct timer_list gc; /* garbage collector */
+ size_t dsize; /* size of element */
+};
+
+/* ADT structure for generic function args */
+struct ipmac {
+ u32 id; /* id in array */
+ unsigned char *ether; /* ethernet address */
+};
+
+/* Member element without and with timeout */
+
+struct ipmac_elem {
+ unsigned char ether[ETH_ALEN];
+ unsigned char match;
+} __attribute__ ((aligned));
+
+struct ipmac_telem {
+ unsigned char ether[ETH_ALEN];
+ unsigned char match;
+ unsigned long timeout;
+} __attribute__ ((aligned));
+
+static inline void *
+bitmap_ipmac_elem(const struct bitmap_ipmac *map, u32 id)
+{
+ return (void *)((char *)map->members + id * map->dsize);
+}
+
+static inline bool
+bitmap_timeout(const struct bitmap_ipmac *map, u32 id)
+{
+ const struct ipmac_telem *elem = bitmap_ipmac_elem(map, id);
+
+ return ip_set_timeout_test(elem->timeout);
+}
+
+static inline bool
+bitmap_expired(const struct bitmap_ipmac *map, u32 id)
+{
+ const struct ipmac_telem *elem = bitmap_ipmac_elem(map, id);
+
+ return ip_set_timeout_expired(elem->timeout);
+}
+
+static inline int
+bitmap_ipmac_exist(const struct ipmac_telem *elem)
+{
+ return elem->match == MAC_UNSET ||
+ (elem->match == MAC_FILLED &&
+ !ip_set_timeout_expired(elem->timeout));
+}
+
+/* Base variant */
+
+static int
+bitmap_ipmac_test(struct ip_set *set, void *value, u32 timeout)
+{
+ const struct bitmap_ipmac *map = set->data;
+ const struct ipmac *data = value;
+ const struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+ switch (elem->match) {
+ case MAC_UNSET:
+ /* Trigger kernel to fill out the ethernet address */
+ return -EAGAIN;
+ case MAC_FILLED:
+ return data->ether == NULL ||
+ compare_ether_addr(data->ether, elem->ether) == 0;
+ }
+ return 0;
+}
+
+static int
+bitmap_ipmac_add(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ipmac *map = set->data;
+ const struct ipmac *data = value;
+ struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+ switch (elem->match) {
+ case MAC_UNSET:
+ if (!data->ether)
+ /* Already added without ethernet address */
+ return -IPSET_ERR_EXIST;
+ /* Fill the MAC address */
+ memcpy(elem->ether, data->ether, ETH_ALEN);
+ elem->match = MAC_FILLED;
+ break;
+ case MAC_FILLED:
+ return -IPSET_ERR_EXIST;
+ case MAC_EMPTY:
+ if (data->ether) {
+ memcpy(elem->ether, data->ether, ETH_ALEN);
+ elem->match = MAC_FILLED;
+ } else
+ elem->match = MAC_UNSET;
+ }
+
+ return 0;
+}
+
+static int
+bitmap_ipmac_del(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ipmac *map = set->data;
+ const struct ipmac *data = value;
+ struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+ if (elem->match == MAC_EMPTY)
+ return -IPSET_ERR_EXIST;
+
+ elem->match = MAC_EMPTY;
+
+ return 0;
+}
+
+static int
+bitmap_ipmac_list(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct bitmap_ipmac *map = set->data;
+ const struct ipmac_elem *elem;
+ struct nlattr *atd, *nested;
+ u32 id, first = cb->args[2];
+ u32 last = map->last_ip - map->first_ip;
+
+ atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!atd)
+ return -EMSGSIZE;
+ for (; cb->args[2] <= last; cb->args[2]++) {
+ id = cb->args[2];
+ elem = bitmap_ipmac_elem(map, id);
+ if (elem->match == MAC_EMPTY)
+ continue;
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (id == first) {
+ nla_nest_cancel(skb, atd);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+ htonl(map->first_ip + id));
+ if (elem->match == MAC_FILLED)
+ NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN,
+ elem->ether);
+ ipset_nest_end(skb, nested);
+ }
+ ipset_nest_end(skb, atd);
+ /* Set listing finished */
+ cb->args[2] = 0;
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nested);
+ ipset_nest_end(skb, atd);
+ if (unlikely(id == first)) {
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+/* Timeout variant */
+
+static int
+bitmap_ipmac_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+ const struct bitmap_ipmac *map = set->data;
+ const struct ipmac *data = value;
+ const struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+ switch (elem->match) {
+ case MAC_UNSET:
+ /* Trigger kernel to fill out the ethernet address */
+ return -EAGAIN;
+ case MAC_FILLED:
+ return (data->ether == NULL ||
+ compare_ether_addr(data->ether, elem->ether) == 0) &&
+ !bitmap_expired(map, data->id);
+ }
+ return 0;
+}
+
+static int
+bitmap_ipmac_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ipmac *map = set->data;
+ const struct ipmac *data = value;
+ struct ipmac_telem *elem = bitmap_ipmac_elem(map, data->id);
+
+ switch (elem->match) {
+ case MAC_UNSET:
+ if (!data->ether)
+ /* Already added without ethernet address */
+ return -IPSET_ERR_EXIST;
+ /* Fill the MAC address and activate the timer */
+ memcpy(elem->ether, data->ether, ETH_ALEN);
+ elem->match = MAC_FILLED;
+ if (timeout == map->timeout)
+ /* Timeout was not specified, get stored one */
+ timeout = elem->timeout;
+ elem->timeout = ip_set_timeout_set(timeout);
+ break;
+ case MAC_FILLED:
+ if (!bitmap_expired(map, data->id))
+ return -IPSET_ERR_EXIST;
+ /* Fall through */
+ case MAC_EMPTY:
+ if (data->ether) {
+ memcpy(elem->ether, data->ether, ETH_ALEN);
+ elem->match = MAC_FILLED;
+ } else
+ elem->match = MAC_UNSET;
+ /* If MAC is unset yet, we store plain timeout value
+ * because the timer is not activated yet
+ * and we can reuse it later when MAC is filled out,
+ * possibly by the kernel */
+ elem->timeout = data->ether ? ip_set_timeout_set(timeout)
+ : timeout;
+ break;
+ }
+
+ return 0;
+}
+
+static int
+bitmap_ipmac_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_ipmac *map = set->data;
+ const struct ipmac *data = value;
+ struct ipmac_telem *elem = bitmap_ipmac_elem(map, data->id);
+
+ if (elem->match == MAC_EMPTY || bitmap_expired(map, data->id))
+ return -IPSET_ERR_EXIST;
+
+ elem->match = MAC_EMPTY;
+
+ return 0;
+}
+
+static int
+bitmap_ipmac_tlist(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct bitmap_ipmac *map = set->data;
+ const struct ipmac_telem *elem;
+ struct nlattr *atd, *nested;
+ u32 id, first = cb->args[2];
+ u32 timeout, last = map->last_ip - map->first_ip;
+
+ atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!atd)
+ return -EMSGSIZE;
+ for (; cb->args[2] <= last; cb->args[2]++) {
+ id = cb->args[2];
+ elem = bitmap_ipmac_elem(map, id);
+ if (!bitmap_ipmac_exist(elem))
+ continue;
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (id == first) {
+ nla_nest_cancel(skb, atd);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+ htonl(map->first_ip + id));
+ if (elem->match == MAC_FILLED)
+ NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN,
+ elem->ether);
+ timeout = elem->match == MAC_UNSET ? elem->timeout
+ : ip_set_timeout_get(elem->timeout);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout));
+ ipset_nest_end(skb, nested);
+ }
+ ipset_nest_end(skb, atd);
+ /* Set listing finished */
+ cb->args[2] = 0;
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nested);
+ ipset_nest_end(skb, atd);
+ return -EMSGSIZE;
+}
+
+static int
+bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ struct bitmap_ipmac *map = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct ipmac data;
+
+ data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
+ if (data.id < map->first_ip || data.id > map->last_ip)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ /* Backward compatibility: we don't check the second flag */
+ if (skb_mac_header(skb) < skb->head ||
+ (skb_mac_header(skb) + ETH_HLEN) > skb->data)
+ return -EINVAL;
+
+ data.id -= map->first_ip;
+ data.ether = eth_hdr(skb)->h_source;
+
+ return adtfn(set, &data, map->timeout);
+}
+
+static int
+bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct bitmap_ipmac *map = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct ipmac data;
+ u32 timeout = map->timeout;
+ int ret = 0;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &data.id);
+ if (ret)
+ return ret;
+
+ if (data.id < map->first_ip || data.id > map->last_ip)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ if (tb[IPSET_ATTR_ETHER])
+ data.ether = nla_data(tb[IPSET_ATTR_ETHER]);
+ else
+ data.ether = NULL;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(map->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ data.id -= map->first_ip;
+
+ ret = adtfn(set, &data, timeout);
+
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+static void
+bitmap_ipmac_destroy(struct ip_set *set)
+{
+ struct bitmap_ipmac *map = set->data;
+
+ if (with_timeout(map->timeout))
+ del_timer_sync(&map->gc);
+
+ ip_set_free(map->members);
+ kfree(map);
+
+ set->data = NULL;
+}
+
+static void
+bitmap_ipmac_flush(struct ip_set *set)
+{
+ struct bitmap_ipmac *map = set->data;
+
+ memset(map->members, 0,
+ (map->last_ip - map->first_ip + 1) * map->dsize);
+}
+
+static int
+bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb)
+{
+ const struct bitmap_ipmac *map = set->data;
+ struct nlattr *nested;
+
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested)
+ goto nla_put_failure;
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+ htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+ htonl(sizeof(*map)
+ + (map->last_ip - map->first_ip + 1) * map->dsize));
+ if (with_timeout(map->timeout))
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+ ipset_nest_end(skb, nested);
+
+ return 0;
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static bool
+bitmap_ipmac_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct bitmap_ipmac *x = a->data;
+ const struct bitmap_ipmac *y = b->data;
+
+ return x->first_ip == y->first_ip &&
+ x->last_ip == y->last_ip &&
+ x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant bitmap_ipmac = {
+ .kadt = bitmap_ipmac_kadt,
+ .uadt = bitmap_ipmac_uadt,
+ .adt = {
+ [IPSET_ADD] = bitmap_ipmac_add,
+ [IPSET_DEL] = bitmap_ipmac_del,
+ [IPSET_TEST] = bitmap_ipmac_test,
+ },
+ .destroy = bitmap_ipmac_destroy,
+ .flush = bitmap_ipmac_flush,
+ .head = bitmap_ipmac_head,
+ .list = bitmap_ipmac_list,
+ .same_set = bitmap_ipmac_same_set,
+};
+
+static const struct ip_set_type_variant bitmap_tipmac = {
+ .kadt = bitmap_ipmac_kadt,
+ .uadt = bitmap_ipmac_uadt,
+ .adt = {
+ [IPSET_ADD] = bitmap_ipmac_tadd,
+ [IPSET_DEL] = bitmap_ipmac_tdel,
+ [IPSET_TEST] = bitmap_ipmac_ttest,
+ },
+ .destroy = bitmap_ipmac_destroy,
+ .flush = bitmap_ipmac_flush,
+ .head = bitmap_ipmac_head,
+ .list = bitmap_ipmac_tlist,
+ .same_set = bitmap_ipmac_same_set,
+};
+
+static void
+bitmap_ipmac_gc(unsigned long ul_set)
+{
+ struct ip_set *set = (struct ip_set *) ul_set;
+ struct bitmap_ipmac *map = set->data;
+ struct ipmac_telem *elem;
+ u32 id, last = map->last_ip - map->first_ip;
+
+ /* We run parallel with other readers (test element)
+ * but adding/deleting new entries is locked out */
+ read_lock_bh(&set->lock);
+ for (id = 0; id <= last; id++) {
+ elem = bitmap_ipmac_elem(map, id);
+ if (elem->match == MAC_FILLED &&
+ ip_set_timeout_expired(elem->timeout))
+ elem->match = MAC_EMPTY;
+ }
+ read_unlock_bh(&set->lock);
+
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+static void
+bitmap_ipmac_gc_init(struct ip_set *set)
+{
+ struct bitmap_ipmac *map = set->data;
+
+ init_timer(&map->gc);
+ map->gc.data = (unsigned long) set;
+ map->gc.function = bitmap_ipmac_gc;
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+/* Create bitmap:ip,mac type of sets */
+
+static bool
+init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
+ u32 first_ip, u32 last_ip)
+{
+ map->members = ip_set_alloc((last_ip - first_ip + 1) * map->dsize);
+ if (!map->members)
+ return false;
+ map->first_ip = first_ip;
+ map->last_ip = last_ip;
+ map->timeout = IPSET_NO_TIMEOUT;
+
+ set->data = map;
+ set->family = AF_INET;
+
+ return true;
+}
+
+static int
+bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
+ u32 flags)
+{
+ u32 first_ip, last_ip, elements;
+ struct bitmap_ipmac *map;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
+ if (ret)
+ return ret;
+ if (first_ip > last_ip) {
+ u32 tmp = first_ip;
+
+ first_ip = last_ip;
+ last_ip = tmp;
+ }
+ } else if (tb[IPSET_ATTR_CIDR]) {
+ u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (cidr >= 32)
+ return -IPSET_ERR_INVALID_CIDR;
+ last_ip = first_ip | ~ip_set_hostmask(cidr);
+ } else
+ return -IPSET_ERR_PROTOCOL;
+
+ elements = last_ip - first_ip + 1;
+
+ if (elements > IPSET_BITMAP_MAX_RANGE + 1)
+ return -IPSET_ERR_BITMAP_RANGE_SIZE;
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ map->dsize = sizeof(struct ipmac_telem);
+
+ if (!init_map_ipmac(set, map, first_ip, last_ip)) {
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+ set->variant = &bitmap_tipmac;
+
+ bitmap_ipmac_gc_init(set);
+ } else {
+ map->dsize = sizeof(struct ipmac_elem);
+
+ if (!init_map_ipmac(set, map, first_ip, last_ip)) {
+ kfree(map);
+ return -ENOMEM;
+ }
+ set->variant = &bitmap_ipmac;
+
+ }
+ return 0;
+}
+
+static struct ip_set_type bitmap_ipmac_type = {
+ .name = "bitmap:ip,mac",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP | IPSET_TYPE_MAC,
+ .dimension = IPSET_DIM_TWO,
+ .family = AF_INET,
+ .revision = 0,
+ .create = bitmap_ipmac_create,
+ .create_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_ETHER] = { .type = NLA_BINARY, .len = ETH_ALEN },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+bitmap_ipmac_init(void)
+{
+ return ip_set_type_register(&bitmap_ipmac_type);
+}
+
+static void __exit
+bitmap_ipmac_fini(void)
+{
+ ip_set_type_unregister(&bitmap_ipmac_type);
+}
+
+module_init(bitmap_ipmac_init);
+module_exit(bitmap_ipmac_fini);
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
new file mode 100644
index 0000000..165f09b
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -0,0 +1,515 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the bitmap:port type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/netlink.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_bitmap.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#define IP_SET_BITMAP_TIMEOUT
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("bitmap:port type of IP sets");
+MODULE_ALIAS("ip_set_bitmap:port");
+
+/* Type structure */
+struct bitmap_port {
+ void *members; /* the set members */
+ u16 first_port; /* host byte order, included in range */
+ u16 last_port; /* host byte order, included in range */
+ size_t memsize; /* members size */
+ u32 timeout; /* timeout parameter */
+ struct timer_list gc; /* garbage collection */
+};
+
+/* Base variant */
+
+static int
+bitmap_port_test(struct ip_set *set, void *value, u32 timeout)
+{
+ const struct bitmap_port *map = set->data;
+ u16 id = *(u16 *)value;
+
+ return !!test_bit(id, map->members);
+}
+
+static int
+bitmap_port_add(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_port *map = set->data;
+ u16 id = *(u16 *)value;
+
+ if (test_and_set_bit(id, map->members))
+ return -IPSET_ERR_EXIST;
+
+ return 0;
+}
+
+static int
+bitmap_port_del(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_port *map = set->data;
+ u16 id = *(u16 *)value;
+
+ if (!test_and_clear_bit(id, map->members))
+ return -IPSET_ERR_EXIST;
+
+ return 0;
+}
+
+static int
+bitmap_port_list(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct bitmap_port *map = set->data;
+ struct nlattr *atd, *nested;
+ u16 id, first = cb->args[2];
+ u16 last = map->last_port - map->first_port;
+
+ atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!atd)
+ return -EMSGSIZE;
+ for (; cb->args[2] <= last; cb->args[2]++) {
+ id = cb->args[2];
+ if (!test_bit(id, map->members))
+ continue;
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (id == first) {
+ nla_nest_cancel(skb, atd);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT,
+ htons(map->first_port + id));
+ ipset_nest_end(skb, nested);
+ }
+ ipset_nest_end(skb, atd);
+ /* Set listing finished */
+ cb->args[2] = 0;
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nested);
+ ipset_nest_end(skb, atd);
+ if (unlikely(id == first)) {
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+/* Timeout variant */
+
+static int
+bitmap_port_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+ const struct bitmap_port *map = set->data;
+ const unsigned long *members = map->members;
+ u16 id = *(u16 *)value;
+
+ return ip_set_timeout_test(members[id]);
+}
+
+static int
+bitmap_port_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_port *map = set->data;
+ unsigned long *members = map->members;
+ u16 id = *(u16 *)value;
+
+ if (ip_set_timeout_test(members[id]))
+ return -IPSET_ERR_EXIST;
+
+ members[id] = ip_set_timeout_set(timeout);
+
+ return 0;
+}
+
+static int
+bitmap_port_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+ struct bitmap_port *map = set->data;
+ unsigned long *members = map->members;
+ u16 id = *(u16 *)value;
+ int ret = -IPSET_ERR_EXIST;
+
+ if (ip_set_timeout_test(members[id]))
+ ret = 0;
+
+ members[id] = IPSET_ELEM_UNSET;
+ return ret;
+}
+
+static int
+bitmap_port_tlist(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct bitmap_port *map = set->data;
+ struct nlattr *adt, *nested;
+ u16 id, first = cb->args[2];
+ u16 last = map->last_port - map->first_port;
+ const unsigned long *members = map->members;
+
+ adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!adt)
+ return -EMSGSIZE;
+ for (; cb->args[2] <= last; cb->args[2]++) {
+ id = cb->args[2];
+ if (!ip_set_timeout_test(members[id]))
+ continue;
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (id == first) {
+ nla_nest_cancel(skb, adt);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT,
+ htons(map->first_port + id));
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(members[id])));
+ ipset_nest_end(skb, nested);
+ }
+ ipset_nest_end(skb, adt);
+
+ /* Set listing finished */
+ cb->args[2] = 0;
+
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nested);
+ ipset_nest_end(skb, adt);
+ if (unlikely(id == first)) {
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+static int
+bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ struct bitmap_port *map = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ __be16 __port;
+ u16 port = 0;
+
+ if (!ip_set_get_ip_port(skb, pf, flags & IPSET_DIM_ONE_SRC, &__port))
+ return -EINVAL;
+
+ port = ntohs(__port);
+
+ if (port < map->first_port || port > map->last_port)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ port -= map->first_port;
+
+ return adtfn(set, &port, map->timeout);
+}
+
+static int
+bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ struct bitmap_port *map = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ u32 timeout = map->timeout;
+ u32 port; /* wraparound */
+ u16 id, port_to;
+ int ret = 0;
+
+ if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
+ if (port < map->first_port || port > map->last_port)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(map->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST) {
+ id = port - map->first_port;
+ return adtfn(set, &id, timeout);
+ }
+
+ if (tb[IPSET_ATTR_PORT_TO]) {
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to) {
+ swap(port, port_to);
+ if (port < map->first_port)
+ return -IPSET_ERR_BITMAP_RANGE;
+ }
+ } else
+ port_to = port;
+
+ if (port_to > map->last_port)
+ return -IPSET_ERR_BITMAP_RANGE;
+
+ for (; port <= port_to; port++) {
+ id = port - map->first_port;
+ ret = adtfn(set, &id, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+static void
+bitmap_port_destroy(struct ip_set *set)
+{
+ struct bitmap_port *map = set->data;
+
+ if (with_timeout(map->timeout))
+ del_timer_sync(&map->gc);
+
+ ip_set_free(map->members);
+ kfree(map);
+
+ set->data = NULL;
+}
+
+static void
+bitmap_port_flush(struct ip_set *set)
+{
+ struct bitmap_port *map = set->data;
+
+ memset(map->members, 0, map->memsize);
+}
+
+static int
+bitmap_port_head(struct ip_set *set, struct sk_buff *skb)
+{
+ const struct bitmap_port *map = set->data;
+ struct nlattr *nested;
+
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested)
+ goto nla_put_failure;
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port));
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+ htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+ htonl(sizeof(*map) + map->memsize));
+ if (with_timeout(map->timeout))
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+ ipset_nest_end(skb, nested);
+
+ return 0;
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static bool
+bitmap_port_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct bitmap_port *x = a->data;
+ const struct bitmap_port *y = b->data;
+
+ return x->first_port == y->first_port &&
+ x->last_port == y->last_port &&
+ x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant bitmap_port = {
+ .kadt = bitmap_port_kadt,
+ .uadt = bitmap_port_uadt,
+ .adt = {
+ [IPSET_ADD] = bitmap_port_add,
+ [IPSET_DEL] = bitmap_port_del,
+ [IPSET_TEST] = bitmap_port_test,
+ },
+ .destroy = bitmap_port_destroy,
+ .flush = bitmap_port_flush,
+ .head = bitmap_port_head,
+ .list = bitmap_port_list,
+ .same_set = bitmap_port_same_set,
+};
+
+static const struct ip_set_type_variant bitmap_tport = {
+ .kadt = bitmap_port_kadt,
+ .uadt = bitmap_port_uadt,
+ .adt = {
+ [IPSET_ADD] = bitmap_port_tadd,
+ [IPSET_DEL] = bitmap_port_tdel,
+ [IPSET_TEST] = bitmap_port_ttest,
+ },
+ .destroy = bitmap_port_destroy,
+ .flush = bitmap_port_flush,
+ .head = bitmap_port_head,
+ .list = bitmap_port_tlist,
+ .same_set = bitmap_port_same_set,
+};
+
+static void
+bitmap_port_gc(unsigned long ul_set)
+{
+ struct ip_set *set = (struct ip_set *) ul_set;
+ struct bitmap_port *map = set->data;
+ unsigned long *table = map->members;
+ u32 id; /* wraparound */
+ u16 last = map->last_port - map->first_port;
+
+ /* We run parallel with other readers (test element)
+ * but adding/deleting new entries is locked out */
+ read_lock_bh(&set->lock);
+ for (id = 0; id <= last; id++)
+ if (ip_set_timeout_expired(table[id]))
+ table[id] = IPSET_ELEM_UNSET;
+ read_unlock_bh(&set->lock);
+
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+static void
+bitmap_port_gc_init(struct ip_set *set)
+{
+ struct bitmap_port *map = set->data;
+
+ init_timer(&map->gc);
+ map->gc.data = (unsigned long) set;
+ map->gc.function = bitmap_port_gc;
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+/* Create bitmap:ip type of sets */
+
+static bool
+init_map_port(struct ip_set *set, struct bitmap_port *map,
+ u16 first_port, u16 last_port)
+{
+ map->members = ip_set_alloc(map->memsize);
+ if (!map->members)
+ return false;
+ map->first_port = first_port;
+ map->last_port = last_port;
+ map->timeout = IPSET_NO_TIMEOUT;
+
+ set->data = map;
+ set->family = AF_UNSPEC;
+
+ return true;
+}
+
+static int
+bitmap_port_create(struct ip_set *set, struct nlattr *tb[],
+ u32 flags)
+{
+ struct bitmap_port *map;
+ u16 first_port, last_port;
+
+ if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ first_port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
+ last_port = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (first_port > last_port) {
+ u16 tmp = first_port;
+
+ first_port = last_port;
+ last_port = tmp;
+ }
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ map->memsize = (last_port - first_port + 1)
+ * sizeof(unsigned long);
+
+ if (!init_map_port(set, map, first_port, last_port)) {
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ set->variant = &bitmap_tport;
+
+ bitmap_port_gc_init(set);
+ } else {
+ map->memsize = bitmap_bytes(0, last_port - first_port);
+ pr_debug("memsize: %zu\n", map->memsize);
+ if (!init_map_port(set, map, first_port, last_port)) {
+ kfree(map);
+ return -ENOMEM;
+ }
+
+ set->variant = &bitmap_port;
+ }
+ return 0;
+}
+
+static struct ip_set_type bitmap_port_type = {
+ .name = "bitmap:port",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_PORT,
+ .dimension = IPSET_DIM_ONE,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = bitmap_port_create,
+ .create_policy = {
+ [IPSET_ATTR_PORT] = { .type = NLA_U16 },
+ [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_PORT] = { .type = NLA_U16 },
+ [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+bitmap_port_init(void)
+{
+ return ip_set_type_register(&bitmap_port_type);
+}
+
+static void __exit
+bitmap_port_fini(void)
+{
+ ip_set_type_unregister(&bitmap_port_type);
+}
+
+module_init(bitmap_port_init);
+module_exit(bitmap_port_fini);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
new file mode 100644
index 0000000..8b1a54c
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -0,0 +1,1671 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module for IP set management */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/netlink.h>
+#include <linux/rculist.h>
+#include <linux/version.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/ipset/ip_set.h>
+
+static LIST_HEAD(ip_set_type_list); /* all registered set types */
+static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */
+
+static struct ip_set **ip_set_list; /* all individual sets */
+static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */
+
+#define STREQ(a, b) (strncmp(a, b, IPSET_MAXNAMELEN) == 0)
+
+static unsigned int max_sets;
+
+module_param(max_sets, int, 0600);
+MODULE_PARM_DESC(max_sets, "maximal number of sets");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("core IP set support");
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
+
+/*
+ * The set types are implemented in modules and registered set types
+ * can be found in ip_set_type_list. Adding/deleting types is
+ * serialized by ip_set_type_mutex.
+ */
+
+static inline void
+ip_set_type_lock(void)
+{
+ mutex_lock(&ip_set_type_mutex);
+}
+
+static inline void
+ip_set_type_unlock(void)
+{
+ mutex_unlock(&ip_set_type_mutex);
+}
+
+/* Register and deregister settype */
+
+static struct ip_set_type *
+find_set_type(const char *name, u8 family, u8 revision)
+{
+ struct ip_set_type *type;
+
+ list_for_each_entry_rcu(type, &ip_set_type_list, list)
+ if (STREQ(type->name, name) &&
+ (type->family == family || type->family == AF_UNSPEC) &&
+ type->revision == revision)
+ return type;
+ return NULL;
+}
+
+/* Unlock, try to load a set type module and lock again */
+static int
+try_to_load_type(const char *name)
+{
+ nfnl_unlock();
+ pr_debug("try to load ip_set_%s\n", name);
+ if (request_module("ip_set_%s", name) < 0) {
+ pr_warning("Can't find ip_set type %s\n", name);
+ nfnl_lock();
+ return -IPSET_ERR_FIND_TYPE;
+ }
+ nfnl_lock();
+ return -EAGAIN;
+}
+
+/* Find a set type and reference it */
+static int
+find_set_type_get(const char *name, u8 family, u8 revision,
+ struct ip_set_type **found)
+{
+ rcu_read_lock();
+ *found = find_set_type(name, family, revision);
+ if (*found) {
+ int err = !try_module_get((*found)->me);
+ rcu_read_unlock();
+ return err ? -EFAULT : 0;
+ }
+ rcu_read_unlock();
+
+ return try_to_load_type(name);
+}
+
+/* Find a given set type by name and family.
+ * If we succeeded, the supported minimal and maximum revisions are
+ * filled out.
+ */
+static int
+find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max)
+{
+ struct ip_set_type *type;
+ bool found = false;
+
+ *min = *max = 0;
+ rcu_read_lock();
+ list_for_each_entry_rcu(type, &ip_set_type_list, list)
+ if (STREQ(type->name, name) &&
+ (type->family == family || type->family == AF_UNSPEC)) {
+ found = true;
+ if (type->revision < *min)
+ *min = type->revision;
+ else if (type->revision > *max)
+ *max = type->revision;
+ }
+ rcu_read_unlock();
+ if (found)
+ return 0;
+
+ return try_to_load_type(name);
+}
+
+#define family_name(f) ((f) == AF_INET ? "inet" : \
+ (f) == AF_INET6 ? "inet6" : "any")
+
+/* Register a set type structure. The type is identified by
+ * the unique triple of name, family and revision.
+ */
+int
+ip_set_type_register(struct ip_set_type *type)
+{
+ int ret = 0;
+
+ if (type->protocol != IPSET_PROTOCOL) {
+ pr_warning("ip_set type %s, family %s, revision %u uses "
+ "wrong protocol version %u (want %u)\n",
+ type->name, family_name(type->family),
+ type->revision, type->protocol, IPSET_PROTOCOL);
+ return -EINVAL;
+ }
+
+ ip_set_type_lock();
+ if (find_set_type(type->name, type->family, type->revision)) {
+ /* Duplicate! */
+ pr_warning("ip_set type %s, family %s, revision %u "
+ "already registered!\n", type->name,
+ family_name(type->family), type->revision);
+ ret = -EINVAL;
+ goto unlock;
+ }
+ list_add_rcu(&type->list, &ip_set_type_list);
+ pr_debug("type %s, family %s, revision %u registered.\n",
+ type->name, family_name(type->family), type->revision);
+unlock:
+ ip_set_type_unlock();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ip_set_type_register);
+
+/* Unregister a set type. There's a small race with ip_set_create */
+void
+ip_set_type_unregister(struct ip_set_type *type)
+{
+ ip_set_type_lock();
+ if (!find_set_type(type->name, type->family, type->revision)) {
+ pr_warning("ip_set type %s, family %s, revision %u "
+ "not registered\n", type->name,
+ family_name(type->family), type->revision);
+ goto unlock;
+ }
+ list_del_rcu(&type->list);
+ pr_debug("type %s, family %s, revision %u unregistered.\n",
+ type->name, family_name(type->family), type->revision);
+unlock:
+ ip_set_type_unlock();
+
+ synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(ip_set_type_unregister);
+
+/* Utility functions */
+void *
+ip_set_alloc(size_t size)
+{
+ void *members = NULL;
+
+ if (size < KMALLOC_MAX_SIZE)
+ members = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+
+ if (members) {
+ pr_debug("%p: allocated with kmalloc\n", members);
+ return members;
+ }
+
+ members = vzalloc(size);
+ if (!members)
+ return NULL;
+ pr_debug("%p: allocated with vmalloc\n", members);
+
+ return members;
+}
+EXPORT_SYMBOL_GPL(ip_set_alloc);
+
+void
+ip_set_free(void *members)
+{
+ pr_debug("%p: free with %s\n", members,
+ is_vmalloc_addr(members) ? "vfree" : "kfree");
+ if (is_vmalloc_addr(members))
+ vfree(members);
+ else
+ kfree(members);
+}
+EXPORT_SYMBOL_GPL(ip_set_free);
+
+static inline bool
+flag_nested(const struct nlattr *nla)
+{
+ return nla->nla_type & NLA_F_NESTED;
+}
+
+static const struct nla_policy ipaddr_policy[IPSET_ATTR_IPADDR_MAX + 1] = {
+ [IPSET_ATTR_IPADDR_IPV4] = { .type = NLA_U32 },
+ [IPSET_ATTR_IPADDR_IPV6] = { .type = NLA_BINARY,
+ .len = sizeof(struct in6_addr) },
+};
+
+int
+ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr)
+{
+ struct nlattr *tb[IPSET_ATTR_IPADDR_MAX+1];
+
+ if (unlikely(!flag_nested(nla)))
+ return -IPSET_ERR_PROTOCOL;
+ if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy))
+ return -IPSET_ERR_PROTOCOL;
+ if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4)))
+ return -IPSET_ERR_PROTOCOL;
+
+ *ipaddr = nla_get_be32(tb[IPSET_ATTR_IPADDR_IPV4]);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ipaddr4);
+
+int
+ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
+{
+ struct nlattr *tb[IPSET_ATTR_IPADDR_MAX+1];
+
+ if (unlikely(!flag_nested(nla)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy))
+ return -IPSET_ERR_PROTOCOL;
+ if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6)))
+ return -IPSET_ERR_PROTOCOL;
+
+ memcpy(ipaddr, nla_data(tb[IPSET_ATTR_IPADDR_IPV6]),
+ sizeof(struct in6_addr));
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
+
+/*
+ * Creating/destroying/renaming/swapping affect the existence and
+ * the properties of a set. All of these can be executed from userspace
+ * only and serialized by the nfnl mutex indirectly from nfnetlink.
+ *
+ * Sets are identified by their index in ip_set_list and the index
+ * is used by the external references (set/SET netfilter modules).
+ *
+ * The set behind an index may change by swapping only, from userspace.
+ */
+
+static inline void
+__ip_set_get(ip_set_id_t index)
+{
+ atomic_inc(&ip_set_list[index]->ref);
+}
+
+static inline void
+__ip_set_put(ip_set_id_t index)
+{
+ atomic_dec(&ip_set_list[index]->ref);
+}
+
+/*
+ * Add, del and test set entries from kernel.
+ *
+ * The set behind the index must exist and must be referenced
+ * so it can't be destroyed (or changed) under our foot.
+ */
+
+int
+ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
+ u8 family, u8 dim, u8 flags)
+{
+ struct ip_set *set = ip_set_list[index];
+ int ret = 0;
+
+ BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+ pr_debug("set %s, index %u\n", set->name, index);
+
+ if (dim < set->type->dimension ||
+ !(family == set->family || set->family == AF_UNSPEC))
+ return 0;
+
+ read_lock_bh(&set->lock);
+ ret = set->variant->kadt(set, skb, IPSET_TEST, family, dim, flags);
+ read_unlock_bh(&set->lock);
+
+ if (ret == -EAGAIN) {
+ /* Type requests element to be completed */
+ pr_debug("element must be competed, ADD is triggered\n");
+ write_lock_bh(&set->lock);
+ set->variant->kadt(set, skb, IPSET_ADD, family, dim, flags);
+ write_unlock_bh(&set->lock);
+ ret = 1;
+ }
+
+ /* Convert error codes to nomatch */
+ return (ret < 0 ? 0 : ret);
+}
+EXPORT_SYMBOL_GPL(ip_set_test);
+
+int
+ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
+ u8 family, u8 dim, u8 flags)
+{
+ struct ip_set *set = ip_set_list[index];
+ int ret;
+
+ BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+ pr_debug("set %s, index %u\n", set->name, index);
+
+ if (dim < set->type->dimension ||
+ !(family == set->family || set->family == AF_UNSPEC))
+ return 0;
+
+ write_lock_bh(&set->lock);
+ ret = set->variant->kadt(set, skb, IPSET_ADD, family, dim, flags);
+ write_unlock_bh(&set->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ip_set_add);
+
+int
+ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
+ u8 family, u8 dim, u8 flags)
+{
+ struct ip_set *set = ip_set_list[index];
+ int ret = 0;
+
+ BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+ pr_debug("set %s, index %u\n", set->name, index);
+
+ if (dim < set->type->dimension ||
+ !(family == set->family || set->family == AF_UNSPEC))
+ return 0;
+
+ write_lock_bh(&set->lock);
+ ret = set->variant->kadt(set, skb, IPSET_DEL, family, dim, flags);
+ write_unlock_bh(&set->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ip_set_del);
+
+/*
+ * Find set by name, reference it once. The reference makes sure the
+ * thing pointed to, does not go away under our feet.
+ *
+ * The nfnl mutex must already be activated.
+ */
+ip_set_id_t
+ip_set_get_byname(const char *name, struct ip_set **set)
+{
+ ip_set_id_t i, index = IPSET_INVALID_ID;
+ struct ip_set *s;
+
+ for (i = 0; i < ip_set_max; i++) {
+ s = ip_set_list[i];
+ if (s != NULL && STREQ(s->name, name)) {
+ __ip_set_get(i);
+ index = i;
+ *set = s;
+ }
+ }
+
+ return index;
+}
+EXPORT_SYMBOL_GPL(ip_set_get_byname);
+
+/*
+ * If the given set pointer points to a valid set, decrement
+ * reference count by 1. The caller shall not assume the index
+ * to be valid, after calling this function.
+ *
+ * The nfnl mutex must already be activated.
+ */
+void
+ip_set_put_byindex(ip_set_id_t index)
+{
+ if (ip_set_list[index] != NULL) {
+ BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
+ __ip_set_put(index);
+ }
+}
+EXPORT_SYMBOL_GPL(ip_set_put_byindex);
+
+/*
+ * Get the name of a set behind a set index.
+ * We assume the set is referenced, so it does exist and
+ * can't be destroyed. The set cannot be renamed due to
+ * the referencing either.
+ *
+ * The nfnl mutex must already be activated.
+ */
+const char *
+ip_set_name_byindex(ip_set_id_t index)
+{
+ const struct ip_set *set = ip_set_list[index];
+
+ BUG_ON(set == NULL);
+ BUG_ON(atomic_read(&set->ref) == 0);
+
+ /* Referenced, so it's safe */
+ return set->name;
+}
+EXPORT_SYMBOL_GPL(ip_set_name_byindex);
+
+/*
+ * Routines to call by external subsystems, which do not
+ * call nfnl_lock for us.
+ */
+
+/*
+ * Find set by name, reference it once. The reference makes sure the
+ * thing pointed to, does not go away under our feet.
+ *
+ * The nfnl mutex is used in the function.
+ */
+ip_set_id_t
+ip_set_nfnl_get(const char *name)
+{
+ struct ip_set *s;
+ ip_set_id_t index;
+
+ nfnl_lock();
+ index = ip_set_get_byname(name, &s);
+ nfnl_unlock();
+
+ return index;
+}
+EXPORT_SYMBOL_GPL(ip_set_nfnl_get);
+
+/*
+ * Find set by index, reference it once. The reference makes sure the
+ * thing pointed to, does not go away under our feet.
+ *
+ * The nfnl mutex is used in the function.
+ */
+ip_set_id_t
+ip_set_nfnl_get_byindex(ip_set_id_t index)
+{
+ if (index > ip_set_max)
+ return IPSET_INVALID_ID;
+
+ nfnl_lock();
+ if (ip_set_list[index])
+ __ip_set_get(index);
+ else
+ index = IPSET_INVALID_ID;
+ nfnl_unlock();
+
+ return index;
+}
+EXPORT_SYMBOL_GPL(ip_set_nfnl_get_byindex);
+
+/*
+ * If the given set pointer points to a valid set, decrement
+ * reference count by 1. The caller shall not assume the index
+ * to be valid, after calling this function.
+ *
+ * The nfnl mutex is used in the function.
+ */
+void
+ip_set_nfnl_put(ip_set_id_t index)
+{
+ nfnl_lock();
+ if (ip_set_list[index] != NULL) {
+ BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
+ __ip_set_put(index);
+ }
+ nfnl_unlock();
+}
+EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
+
+/*
+ * Communication protocol with userspace over netlink.
+ *
+ * We already locked by nfnl_lock.
+ */
+
+static inline bool
+protocol_failed(const struct nlattr * const tb[])
+{
+ return !tb[IPSET_ATTR_PROTOCOL] ||
+ nla_get_u8(tb[IPSET_ATTR_PROTOCOL]) != IPSET_PROTOCOL;
+}
+
+static inline u32
+flag_exist(const struct nlmsghdr *nlh)
+{
+ return nlh->nlmsg_flags & NLM_F_EXCL ? 0 : IPSET_FLAG_EXIST;
+}
+
+static struct nlmsghdr *
+start_msg(struct sk_buff *skb, u32 pid, u32 seq, unsigned int flags,
+ enum ipset_cmd cmd)
+{
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfmsg;
+
+ nlh = nlmsg_put(skb, pid, seq, cmd | (NFNL_SUBSYS_IPSET << 8),
+ sizeof(*nfmsg), flags);
+ if (nlh == NULL)
+ return NULL;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = AF_INET;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = 0;
+
+ return nlh;
+}
+
+/* Create a set */
+
+static const struct nla_policy ip_set_create_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
+ [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAXNAMELEN - 1 },
+ [IPSET_ATTR_TYPENAME] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAXNAMELEN - 1},
+ [IPSET_ATTR_REVISION] = { .type = NLA_U8 },
+ [IPSET_ATTR_FAMILY] = { .type = NLA_U8 },
+ [IPSET_ATTR_DATA] = { .type = NLA_NESTED },
+};
+
+static ip_set_id_t
+find_set_id(const char *name)
+{
+ ip_set_id_t i, index = IPSET_INVALID_ID;
+ const struct ip_set *set;
+
+ for (i = 0; index == IPSET_INVALID_ID && i < ip_set_max; i++) {
+ set = ip_set_list[i];
+ if (set != NULL && STREQ(set->name, name))
+ index = i;
+ }
+ return index;
+}
+
+static inline struct ip_set *
+find_set(const char *name)
+{
+ ip_set_id_t index = find_set_id(name);
+
+ return index == IPSET_INVALID_ID ? NULL : ip_set_list[index];
+}
+
+static int
+find_free_id(const char *name, ip_set_id_t *index, struct ip_set **set)
+{
+ ip_set_id_t i;
+
+ *index = IPSET_INVALID_ID;
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] == NULL) {
+ if (*index == IPSET_INVALID_ID)
+ *index = i;
+ } else if (STREQ(name, ip_set_list[i]->name)) {
+ /* Name clash */
+ *set = ip_set_list[i];
+ return -EEXIST;
+ }
+ }
+ if (*index == IPSET_INVALID_ID)
+ /* No free slot remained */
+ return -IPSET_ERR_MAX_SETS;
+ return 0;
+}
+
+static int
+ip_set_create(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct ip_set *set, *clash;
+ ip_set_id_t index = IPSET_INVALID_ID;
+ struct nlattr *tb[IPSET_ATTR_CREATE_MAX+1] = {};
+ const char *name, *typename;
+ u8 family, revision;
+ u32 flags = flag_exist(nlh);
+ int ret = 0;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_SETNAME] == NULL ||
+ attr[IPSET_ATTR_TYPENAME] == NULL ||
+ attr[IPSET_ATTR_REVISION] == NULL ||
+ attr[IPSET_ATTR_FAMILY] == NULL ||
+ (attr[IPSET_ATTR_DATA] != NULL &&
+ !flag_nested(attr[IPSET_ATTR_DATA]))))
+ return -IPSET_ERR_PROTOCOL;
+
+ name = nla_data(attr[IPSET_ATTR_SETNAME]);
+ typename = nla_data(attr[IPSET_ATTR_TYPENAME]);
+ family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
+ revision = nla_get_u8(attr[IPSET_ATTR_REVISION]);
+ pr_debug("setname: %s, typename: %s, family: %s, revision: %u\n",
+ name, typename, family_name(family), revision);
+
+ /*
+ * First, and without any locks, allocate and initialize
+ * a normal base set structure.
+ */
+ set = kzalloc(sizeof(struct ip_set), GFP_KERNEL);
+ if (!set)
+ return -ENOMEM;
+ rwlock_init(&set->lock);
+ strlcpy(set->name, name, IPSET_MAXNAMELEN);
+ atomic_set(&set->ref, 0);
+ set->family = family;
+
+ /*
+ * Next, check that we know the type, and take
+ * a reference on the type, to make sure it stays available
+ * while constructing our new set.
+ *
+ * After referencing the type, we try to create the type
+ * specific part of the set without holding any locks.
+ */
+ ret = find_set_type_get(typename, family, revision, &(set->type));
+ if (ret)
+ goto out;
+
+ /*
+ * Without holding any locks, create private part.
+ */
+ if (attr[IPSET_ATTR_DATA] &&
+ nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA],
+ set->type->create_policy)) {
+ ret = -IPSET_ERR_PROTOCOL;
+ goto put_out;
+ }
+
+ ret = set->type->create(set, tb, flags);
+ if (ret != 0)
+ goto put_out;
+
+ /* BTW, ret==0 here. */
+
+ /*
+ * Here, we have a valid, constructed set and we are protected
+ * by nfnl_lock. Find the first free index in ip_set_list and
+ * check clashing.
+ */
+ if ((ret = find_free_id(set->name, &index, &clash)) != 0) {
+ /* If this is the same set and requested, ignore error */
+ if (ret == -EEXIST &&
+ (flags & IPSET_FLAG_EXIST) &&
+ STREQ(set->type->name, clash->type->name) &&
+ set->type->family == clash->type->family &&
+ set->type->revision == clash->type->revision &&
+ set->variant->same_set(set, clash))
+ ret = 0;
+ goto cleanup;
+ }
+
+ /*
+ * Finally! Add our shiny new set to the list, and be done.
+ */
+ pr_debug("create: '%s' created with index %u!\n", set->name, index);
+ ip_set_list[index] = set;
+
+ return ret;
+
+cleanup:
+ set->variant->destroy(set);
+put_out:
+ module_put(set->type->me);
+out:
+ kfree(set);
+ return ret;
+}
+
+/* Destroy sets */
+
+static const struct nla_policy
+ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
+ [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAXNAMELEN - 1 },
+};
+
+static void
+ip_set_destroy_set(ip_set_id_t index)
+{
+ struct ip_set *set = ip_set_list[index];
+
+ pr_debug("set: %s\n", set->name);
+ ip_set_list[index] = NULL;
+
+ /* Must call it without holding any lock */
+ set->variant->destroy(set);
+ module_put(set->type->me);
+ kfree(set);
+}
+
+static int
+ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ ip_set_id_t i;
+
+ if (unlikely(protocol_failed(attr)))
+ return -IPSET_ERR_PROTOCOL;
+
+ /* References are protected by the nfnl mutex */
+ if (!attr[IPSET_ATTR_SETNAME]) {
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL &&
+ (atomic_read(&ip_set_list[i]->ref)))
+ return -IPSET_ERR_BUSY;
+ }
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL)
+ ip_set_destroy_set(i);
+ }
+ } else {
+ i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (i == IPSET_INVALID_ID)
+ return -ENOENT;
+ else if (atomic_read(&ip_set_list[i]->ref))
+ return -IPSET_ERR_BUSY;
+
+ ip_set_destroy_set(i);
+ }
+ return 0;
+}
+
+/* Flush sets */
+
+static void
+ip_set_flush_set(struct ip_set *set)
+{
+ pr_debug("set: %s\n", set->name);
+
+ write_lock_bh(&set->lock);
+ set->variant->flush(set);
+ write_unlock_bh(&set->lock);
+}
+
+static int
+ip_set_flush(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ ip_set_id_t i;
+
+ if (unlikely(protocol_failed(attr)))
+ return -EPROTO;
+
+ if (!attr[IPSET_ATTR_SETNAME]) {
+ for (i = 0; i < ip_set_max; i++)
+ if (ip_set_list[i] != NULL)
+ ip_set_flush_set(ip_set_list[i]);
+ } else {
+ i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (i == IPSET_INVALID_ID)
+ return -ENOENT;
+
+ ip_set_flush_set(ip_set_list[i]);
+ }
+
+ return 0;
+}
+
+/* Rename a set */
+
+static const struct nla_policy
+ip_set_setname2_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
+ [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAXNAMELEN - 1 },
+ [IPSET_ATTR_SETNAME2] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAXNAMELEN - 1 },
+};
+
+static int
+ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct ip_set *set;
+ const char *name2;
+ ip_set_id_t i;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_SETNAME] == NULL ||
+ attr[IPSET_ATTR_SETNAME2] == NULL))
+ return -IPSET_ERR_PROTOCOL;
+
+ set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (set == NULL)
+ return -ENOENT;
+ if (atomic_read(&set->ref) != 0)
+ return -IPSET_ERR_REFERENCED;
+
+ name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL &&
+ STREQ(ip_set_list[i]->name, name2))
+ return -IPSET_ERR_EXIST_SETNAME2;
+ }
+ strncpy(set->name, name2, IPSET_MAXNAMELEN);
+
+ return 0;
+}
+
+/* Swap two sets so that name/index points to the other.
+ * References and set names are also swapped.
+ *
+ * We are protected by the nfnl mutex and references are
+ * manipulated only by holding the mutex. The kernel interfaces
+ * do not hold the mutex but the pointer settings are atomic
+ * so the ip_set_list always contains valid pointers to the sets.
+ */
+
+static int
+ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct ip_set *from, *to;
+ ip_set_id_t from_id, to_id;
+ char from_name[IPSET_MAXNAMELEN];
+ u32 from_ref;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_SETNAME] == NULL ||
+ attr[IPSET_ATTR_SETNAME2] == NULL))
+ return -IPSET_ERR_PROTOCOL;
+
+ from_id = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (from_id == IPSET_INVALID_ID)
+ return -ENOENT;
+
+ to_id = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME2]));
+ if (to_id == IPSET_INVALID_ID)
+ return -IPSET_ERR_EXIST_SETNAME2;
+
+ from = ip_set_list[from_id];
+ to = ip_set_list[to_id];
+
+ /* Features must not change.
+ * Not an artifical restriction anymore, as we must prevent
+ * possible loops created by swapping in setlist type of sets. */
+ if (!(from->type->features == to->type->features &&
+ from->type->family == to->type->family))
+ return -IPSET_ERR_TYPE_MISMATCH;
+
+ /* No magic here: ref munging protected by the nfnl_lock */
+ strncpy(from_name, from->name, IPSET_MAXNAMELEN);
+ from_ref = atomic_read(&from->ref);
+
+ strncpy(from->name, to->name, IPSET_MAXNAMELEN);
+ atomic_set(&from->ref, atomic_read(&to->ref));
+ strncpy(to->name, from_name, IPSET_MAXNAMELEN);
+ atomic_set(&to->ref, from_ref);
+
+ ip_set_list[from_id] = to;
+ ip_set_list[to_id] = from;
+
+ return 0;
+}
+
+/* List/save set data */
+
+#define DUMP_INIT 0L
+#define DUMP_ALL 1L
+#define DUMP_ONE 2L
+#define DUMP_LAST 3L
+
+static int
+ip_set_dump_done(struct netlink_callback *cb)
+{
+ if (cb->args[2]) {
+ pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name);
+ __ip_set_put((ip_set_id_t) cb->args[1]);
+ }
+ return 0;
+}
+
+static inline void
+dump_attrs(struct nlmsghdr *nlh)
+{
+ const struct nlattr *attr;
+ int rem;
+
+ pr_debug("dump nlmsg\n");
+ nlmsg_for_each_attr(attr, nlh, sizeof(struct nfgenmsg), rem) {
+ pr_debug("type: %u, len %u\n", nla_type(attr), attr->nla_len);
+ }
+}
+
+static int
+dump_init(struct netlink_callback *cb)
+{
+ struct nlmsghdr *nlh = nlmsg_hdr(cb->skb);
+ int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg));
+ struct nlattr *cda[IPSET_ATTR_CMD_MAX+1];
+ struct nlattr *attr = (void *)nlh + min_len;
+ ip_set_id_t index;
+
+ /* Second pass, so parser can't fail */
+ nla_parse(cda, IPSET_ATTR_CMD_MAX,
+ attr, nlh->nlmsg_len - min_len, ip_set_setname_policy);
+
+ /* cb->args[0] : dump single set/all sets
+ * [1] : set index
+ * [..]: type specific
+ */
+
+ if (!cda[IPSET_ATTR_SETNAME]) {
+ cb->args[0] = DUMP_ALL;
+ return 0;
+ }
+
+ index = find_set_id(nla_data(cda[IPSET_ATTR_SETNAME]));
+ if (index == IPSET_INVALID_ID)
+ return -ENOENT;
+
+ cb->args[0] = DUMP_ONE;
+ cb->args[1] = index;
+ return 0;
+}
+
+static int
+ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ ip_set_id_t index = IPSET_INVALID_ID, max;
+ struct ip_set *set = NULL;
+ struct nlmsghdr *nlh = NULL;
+ unsigned int flags = NETLINK_CB(cb->skb).pid ? NLM_F_MULTI : 0;
+ int ret = 0;
+
+ if (cb->args[0] == DUMP_INIT) {
+ ret = dump_init(cb);
+ if (ret < 0) {
+ nlh = nlmsg_hdr(cb->skb);
+ /* We have to create and send the error message
+ * manually :-( */
+ if (nlh->nlmsg_flags & NLM_F_ACK)
+ netlink_ack(cb->skb, nlh, ret);
+ return ret;
+ }
+ }
+
+ if (cb->args[1] >= ip_set_max)
+ goto out;
+
+ pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
+ max = cb->args[0] == DUMP_ONE ? cb->args[1] + 1 : ip_set_max;
+ for (; cb->args[1] < max; cb->args[1]++) {
+ index = (ip_set_id_t) cb->args[1];
+ set = ip_set_list[index];
+ if (set == NULL) {
+ if (cb->args[0] == DUMP_ONE) {
+ ret = -ENOENT;
+ goto out;
+ }
+ continue;
+ }
+ /* When dumping all sets, we must dump "sorted"
+ * so that lists (unions of sets) are dumped last.
+ */
+ if (cb->args[0] != DUMP_ONE &&
+ !((cb->args[0] == DUMP_ALL) ^
+ (set->type->features & IPSET_DUMP_LAST)))
+ continue;
+ pr_debug("List set: %s\n", set->name);
+ if (!cb->args[2]) {
+ /* Start listing: make sure set won't be destroyed */
+ pr_debug("reference set\n");
+ __ip_set_get(index);
+ }
+ nlh = start_msg(skb, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, flags,
+ IPSET_CMD_LIST);
+ if (!nlh) {
+ ret = -EMSGSIZE;
+ goto release_refcount;
+ }
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+ NLA_PUT_STRING(skb, IPSET_ATTR_SETNAME, set->name);
+ switch (cb->args[2]) {
+ case 0:
+ /* Core header data */
+ NLA_PUT_STRING(skb, IPSET_ATTR_TYPENAME,
+ set->type->name);
+ NLA_PUT_U8(skb, IPSET_ATTR_FAMILY,
+ set->family);
+ NLA_PUT_U8(skb, IPSET_ATTR_REVISION,
+ set->type->revision);
+ ret = set->variant->head(set, skb);
+ if (ret < 0)
+ goto release_refcount;
+ /* Fall through and add elements */
+ default:
+ read_lock_bh(&set->lock);
+ ret = set->variant->list(set, skb, cb);
+ read_unlock_bh(&set->lock);
+ if (!cb->args[2]) {
+ /* Set is done, proceed with next one */
+ if (cb->args[0] == DUMP_ONE)
+ cb->args[1] = IPSET_INVALID_ID;
+ else
+ cb->args[1]++;
+ }
+ goto release_refcount;
+ }
+ }
+ goto out;
+
+nla_put_failure:
+ ret = -EFAULT;
+release_refcount:
+ /* If there was an error or set is done, release set */
+ if (ret || !cb->args[2]) {
+ pr_debug("release set %s\n", ip_set_list[index]->name);
+ __ip_set_put(index);
+ }
+
+ /* If we dump all sets, continue with dumping last ones */
+ if (cb->args[0] == DUMP_ALL && cb->args[1] >= max && !cb->args[2])
+ cb->args[0] = DUMP_LAST;
+
+out:
+ if (nlh) {
+ nlmsg_end(skb, nlh);
+ pr_debug("nlmsg_len: %u\n", nlh->nlmsg_len);
+ dump_attrs(nlh);
+ }
+
+ return ret < 0 ? ret : skb->len;
+}
+
+static int
+ip_set_dump(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ if (unlikely(protocol_failed(attr)))
+ return -IPSET_ERR_PROTOCOL;
+
+ return netlink_dump_start(ctnl, skb, nlh,
+ ip_set_dump_start,
+ ip_set_dump_done);
+}
+
+/* Add, del and test */
+
+static const struct nla_policy ip_set_adt_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
+ [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAXNAMELEN - 1 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ [IPSET_ATTR_DATA] = { .type = NLA_NESTED },
+ [IPSET_ATTR_ADT] = { .type = NLA_NESTED },
+};
+
+static int
+call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
+ struct nlattr *tb[], enum ipset_adt adt,
+ u32 flags, bool use_lineno)
+{
+ int ret, retried = 0;
+ u32 lineno = 0;
+ bool eexist = flags & IPSET_FLAG_EXIST;
+
+ do {
+ write_lock_bh(&set->lock);
+ ret = set->variant->uadt(set, tb, adt, &lineno, flags);
+ write_unlock_bh(&set->lock);
+ } while (ret == -EAGAIN &&
+ set->variant->resize &&
+ (ret = set->variant->resize(set, retried++)) == 0);
+
+ if (!ret || (ret == -IPSET_ERR_EXIST && eexist))
+ return 0;
+ if (lineno && use_lineno) {
+ /* Error in restore/batch mode: send back lineno */
+ struct nlmsghdr *rep, *nlh = nlmsg_hdr(skb);
+ struct sk_buff *skb2;
+ struct nlmsgerr *errmsg;
+ size_t payload = sizeof(*errmsg) + nlmsg_len(nlh);
+ int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg));
+ struct nlattr *cda[IPSET_ATTR_CMD_MAX+1];
+ struct nlattr *cmdattr;
+ u32 *errline;
+
+ skb2 = nlmsg_new(payload, GFP_KERNEL);
+ if (skb2 == NULL)
+ return -ENOMEM;
+ rep = __nlmsg_put(skb2, NETLINK_CB(skb).pid,
+ nlh->nlmsg_seq, NLMSG_ERROR, payload, 0);
+ errmsg = nlmsg_data(rep);
+ errmsg->error = ret;
+ memcpy(&errmsg->msg, nlh, nlh->nlmsg_len);
+ cmdattr = (void *)&errmsg->msg + min_len;
+
+ nla_parse(cda, IPSET_ATTR_CMD_MAX,
+ cmdattr, nlh->nlmsg_len - min_len,
+ ip_set_adt_policy);
+
+ errline = nla_data(cda[IPSET_ATTR_LINENO]);
+
+ *errline = lineno;
+
+ netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+ /* Signal netlink not to send its ACK/errmsg. */
+ return -EINTR;
+ }
+
+ return ret;
+}
+
+static int
+ip_set_uadd(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct ip_set *set;
+ struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
+ const struct nlattr *nla;
+ u32 flags = flag_exist(nlh);
+ bool use_lineno;
+ int ret = 0;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_SETNAME] == NULL ||
+ !((attr[IPSET_ATTR_DATA] != NULL) ^
+ (attr[IPSET_ATTR_ADT] != NULL)) ||
+ (attr[IPSET_ATTR_DATA] != NULL &&
+ !flag_nested(attr[IPSET_ATTR_DATA])) ||
+ (attr[IPSET_ATTR_ADT] != NULL &&
+ (!flag_nested(attr[IPSET_ATTR_ADT]) ||
+ attr[IPSET_ATTR_LINENO] == NULL))))
+ return -IPSET_ERR_PROTOCOL;
+
+ set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (set == NULL)
+ return -ENOENT;
+
+ use_lineno = !!attr[IPSET_ATTR_LINENO];
+ if (attr[IPSET_ATTR_DATA]) {
+ if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX,
+ attr[IPSET_ATTR_DATA],
+ set->type->adt_policy))
+ return -IPSET_ERR_PROTOCOL;
+ ret = call_ad(ctnl, skb, set, tb, IPSET_ADD, flags,
+ use_lineno);
+ } else {
+ int nla_rem;
+
+ nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
+ memset(tb, 0, sizeof(tb));
+ if (nla_type(nla) != IPSET_ATTR_DATA ||
+ !flag_nested(nla) ||
+ nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla,
+ set->type->adt_policy))
+ return -IPSET_ERR_PROTOCOL;
+ ret = call_ad(ctnl, skb, set, tb, IPSET_ADD,
+ flags, use_lineno);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ return ret;
+}
+
+static int
+ip_set_udel(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct ip_set *set;
+ struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
+ const struct nlattr *nla;
+ u32 flags = flag_exist(nlh);
+ bool use_lineno;
+ int ret = 0;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_SETNAME] == NULL ||
+ !((attr[IPSET_ATTR_DATA] != NULL) ^
+ (attr[IPSET_ATTR_ADT] != NULL)) ||
+ (attr[IPSET_ATTR_DATA] != NULL &&
+ !flag_nested(attr[IPSET_ATTR_DATA])) ||
+ (attr[IPSET_ATTR_ADT] != NULL &&
+ (!flag_nested(attr[IPSET_ATTR_ADT]) ||
+ attr[IPSET_ATTR_LINENO] == NULL))))
+ return -IPSET_ERR_PROTOCOL;
+
+ set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (set == NULL)
+ return -ENOENT;
+
+ use_lineno = !!attr[IPSET_ATTR_LINENO];
+ if (attr[IPSET_ATTR_DATA]) {
+ if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX,
+ attr[IPSET_ATTR_DATA],
+ set->type->adt_policy))
+ return -IPSET_ERR_PROTOCOL;
+ ret = call_ad(ctnl, skb, set, tb, IPSET_DEL, flags,
+ use_lineno);
+ } else {
+ int nla_rem;
+
+ nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
+ memset(tb, 0, sizeof(*tb));
+ if (nla_type(nla) != IPSET_ATTR_DATA ||
+ !flag_nested(nla) ||
+ nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla,
+ set->type->adt_policy))
+ return -IPSET_ERR_PROTOCOL;
+ ret = call_ad(ctnl, skb, set, tb, IPSET_DEL,
+ flags, use_lineno);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ return ret;
+}
+
+static int
+ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct ip_set *set;
+ struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
+ int ret = 0;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_SETNAME] == NULL ||
+ attr[IPSET_ATTR_DATA] == NULL ||
+ !flag_nested(attr[IPSET_ATTR_DATA])))
+ return -IPSET_ERR_PROTOCOL;
+
+ set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (set == NULL)
+ return -ENOENT;
+
+ if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA],
+ set->type->adt_policy))
+ return -IPSET_ERR_PROTOCOL;
+
+ read_lock_bh(&set->lock);
+ ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0);
+ read_unlock_bh(&set->lock);
+ /* Userspace can't trigger element to be re-added */
+ if (ret == -EAGAIN)
+ ret = 1;
+
+ return ret < 0 ? ret : ret > 0 ? 0 : -IPSET_ERR_EXIST;
+}
+
+/* Get headed data of a set */
+
+static int
+ip_set_header(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ const struct ip_set *set;
+ struct sk_buff *skb2;
+ struct nlmsghdr *nlh2;
+ ip_set_id_t index;
+ int ret = 0;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_SETNAME] == NULL))
+ return -IPSET_ERR_PROTOCOL;
+
+ index = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+ if (index == IPSET_INVALID_ID)
+ return -ENOENT;
+ set = ip_set_list[index];
+
+ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (skb2 == NULL)
+ return -ENOMEM;
+
+ nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+ IPSET_CMD_HEADER);
+ if (!nlh2)
+ goto nlmsg_failure;
+ NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+ NLA_PUT_STRING(skb2, IPSET_ATTR_SETNAME, set->name);
+ NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, set->type->name);
+ NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, set->family);
+ NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, set->type->revision);
+ nlmsg_end(skb2, nlh2);
+
+ ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(skb2, nlh2);
+nlmsg_failure:
+ kfree_skb(skb2);
+ return -EMSGSIZE;
+}
+
+/* Get type data */
+
+static const struct nla_policy ip_set_type_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
+ [IPSET_ATTR_TYPENAME] = { .type = NLA_NUL_STRING,
+ .len = IPSET_MAXNAMELEN - 1 },
+ [IPSET_ATTR_FAMILY] = { .type = NLA_U8 },
+};
+
+static int
+ip_set_type(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct sk_buff *skb2;
+ struct nlmsghdr *nlh2;
+ u8 family, min, max;
+ const char *typename;
+ int ret = 0;
+
+ if (unlikely(protocol_failed(attr) ||
+ attr[IPSET_ATTR_TYPENAME] == NULL ||
+ attr[IPSET_ATTR_FAMILY] == NULL))
+ return -IPSET_ERR_PROTOCOL;
+
+ family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
+ typename = nla_data(attr[IPSET_ATTR_TYPENAME]);
+ ret = find_set_type_minmax(typename, family, &min, &max);
+ if (ret)
+ return ret;
+
+ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (skb2 == NULL)
+ return -ENOMEM;
+
+ nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+ IPSET_CMD_TYPE);
+ if (!nlh2)
+ goto nlmsg_failure;
+ NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+ NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, typename);
+ NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, family);
+ NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, max);
+ NLA_PUT_U8(skb2, IPSET_ATTR_REVISION_MIN, min);
+ nlmsg_end(skb2, nlh2);
+
+ pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len);
+ ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(skb2, nlh2);
+nlmsg_failure:
+ kfree_skb(skb2);
+ return -EMSGSIZE;
+}
+
+/* Get protocol version */
+
+static const struct nla_policy
+ip_set_protocol_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
+};
+
+static int
+ip_set_protocol(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ struct sk_buff *skb2;
+ struct nlmsghdr *nlh2;
+ int ret = 0;
+
+ if (unlikely(attr[IPSET_ATTR_PROTOCOL] == NULL))
+ return -IPSET_ERR_PROTOCOL;
+
+ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (skb2 == NULL)
+ return -ENOMEM;
+
+ nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+ IPSET_CMD_PROTOCOL);
+ if (!nlh2)
+ goto nlmsg_failure;
+ NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+ nlmsg_end(skb2, nlh2);
+
+ ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(skb2, nlh2);
+nlmsg_failure:
+ kfree_skb(skb2);
+ return -EMSGSIZE;
+}
+
+static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
+ [IPSET_CMD_CREATE] = {
+ .call = ip_set_create,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_create_policy,
+ },
+ [IPSET_CMD_DESTROY] = {
+ .call = ip_set_destroy,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_setname_policy,
+ },
+ [IPSET_CMD_FLUSH] = {
+ .call = ip_set_flush,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_setname_policy,
+ },
+ [IPSET_CMD_RENAME] = {
+ .call = ip_set_rename,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_setname2_policy,
+ },
+ [IPSET_CMD_SWAP] = {
+ .call = ip_set_swap,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_setname2_policy,
+ },
+ [IPSET_CMD_LIST] = {
+ .call = ip_set_dump,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_setname_policy,
+ },
+ [IPSET_CMD_SAVE] = {
+ .call = ip_set_dump,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_setname_policy,
+ },
+ [IPSET_CMD_ADD] = {
+ .call = ip_set_uadd,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_adt_policy,
+ },
+ [IPSET_CMD_DEL] = {
+ .call = ip_set_udel,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_adt_policy,
+ },
+ [IPSET_CMD_TEST] = {
+ .call = ip_set_utest,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_adt_policy,
+ },
+ [IPSET_CMD_HEADER] = {
+ .call = ip_set_header,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_setname_policy,
+ },
+ [IPSET_CMD_TYPE] = {
+ .call = ip_set_type,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_type_policy,
+ },
+ [IPSET_CMD_PROTOCOL] = {
+ .call = ip_set_protocol,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_protocol_policy,
+ },
+};
+
+static struct nfnetlink_subsystem ip_set_netlink_subsys __read_mostly = {
+ .name = "ip_set",
+ .subsys_id = NFNL_SUBSYS_IPSET,
+ .cb_count = IPSET_MSG_MAX,
+ .cb = ip_set_netlink_subsys_cb,
+};
+
+/* Interface to iptables/ip6tables */
+
+static int
+ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
+{
+ unsigned *op;
+ void *data;
+ int copylen = *len, ret = 0;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (optval != SO_IP_SET)
+ return -EBADF;
+ if (*len < sizeof(unsigned))
+ return -EINVAL;
+
+ data = vmalloc(*len);
+ if (!data)
+ return -ENOMEM;
+ if (copy_from_user(data, user, *len) != 0) {
+ ret = -EFAULT;
+ goto done;
+ }
+ op = (unsigned *) data;
+
+ if (*op < IP_SET_OP_VERSION) {
+ /* Check the version at the beginning of operations */
+ struct ip_set_req_version *req_version = data;
+ if (req_version->version != IPSET_PROTOCOL) {
+ ret = -EPROTO;
+ goto done;
+ }
+ }
+
+ switch (*op) {
+ case IP_SET_OP_VERSION: {
+ struct ip_set_req_version *req_version = data;
+
+ if (*len != sizeof(struct ip_set_req_version)) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ req_version->version = IPSET_PROTOCOL;
+ ret = copy_to_user(user, req_version,
+ sizeof(struct ip_set_req_version));
+ goto done;
+ }
+ case IP_SET_OP_GET_BYNAME: {
+ struct ip_set_req_get_set *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_get_set)) {
+ ret = -EINVAL;
+ goto done;
+ }
+ req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
+ nfnl_lock();
+ req_get->set.index = find_set_id(req_get->set.name);
+ nfnl_unlock();
+ goto copy;
+ }
+ case IP_SET_OP_GET_BYINDEX: {
+ struct ip_set_req_get_set *req_get = data;
+
+ if (*len != sizeof(struct ip_set_req_get_set) ||
+ req_get->set.index >= ip_set_max) {
+ ret = -EINVAL;
+ goto done;
+ }
+ nfnl_lock();
+ strncpy(req_get->set.name,
+ ip_set_list[req_get->set.index]
+ ? ip_set_list[req_get->set.index]->name : "",
+ IPSET_MAXNAMELEN);
+ nfnl_unlock();
+ goto copy;
+ }
+ default:
+ ret = -EBADMSG;
+ goto done;
+ } /* end of switch(op) */
+
+copy:
+ ret = copy_to_user(user, data, copylen);
+
+done:
+ vfree(data);
+ if (ret > 0)
+ ret = 0;
+ return ret;
+}
+
+static struct nf_sockopt_ops so_set __read_mostly = {
+ .pf = PF_INET,
+ .get_optmin = SO_IP_SET,
+ .get_optmax = SO_IP_SET + 1,
+ .get = &ip_set_sockfn_get,
+ .owner = THIS_MODULE,
+};
+
+static int __init
+ip_set_init(void)
+{
+ int ret;
+
+ if (max_sets)
+ ip_set_max = max_sets;
+ if (ip_set_max >= IPSET_INVALID_ID)
+ ip_set_max = IPSET_INVALID_ID - 1;
+
+ ip_set_list = kzalloc(sizeof(struct ip_set *) * ip_set_max,
+ GFP_KERNEL);
+ if (!ip_set_list) {
+ pr_err("ip_set: Unable to create ip_set_list\n");
+ return -ENOMEM;
+ }
+
+ ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
+ if (ret != 0) {
+ pr_err("ip_set: cannot register with nfnetlink.\n");
+ kfree(ip_set_list);
+ return ret;
+ }
+ ret = nf_register_sockopt(&so_set);
+ if (ret != 0) {
+ pr_err("SO_SET registry failed: %d\n", ret);
+ nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+ kfree(ip_set_list);
+ return ret;
+ }
+
+ pr_notice("ip_set: protocol %u\n", IPSET_PROTOCOL);
+ return 0;
+}
+
+static void __exit
+ip_set_fini(void)
+{
+ /* There can't be any existing set */
+ nf_unregister_sockopt(&so_set);
+ nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+ kfree(ip_set_list);
+ pr_debug("these are the famous last words\n");
+}
+
+module_init(ip_set_init);
+module_exit(ip_set_fini);
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
new file mode 100644
index 0000000..8d52272
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -0,0 +1,141 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Get Layer-4 data from the packets */
+
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include <linux/netfilter/ipset/ip_set_getport.h>
+
+/* We must handle non-linear skbs */
+static bool
+get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
+ bool src, __be16 *port, u8 *proto)
+{
+ switch (protocol) {
+ case IPPROTO_TCP: {
+ struct tcphdr _tcph;
+ const struct tcphdr *th;
+
+ th = skb_header_pointer(skb, protooff, sizeof(_tcph), &_tcph);
+ if (th == NULL)
+ /* No choice either */
+ return false;
+
+ *port = src ? th->source : th->dest;
+ break;
+ }
+ case IPPROTO_UDP: {
+ struct udphdr _udph;
+ const struct udphdr *uh;
+
+ uh = skb_header_pointer(skb, protooff, sizeof(_udph), &_udph);
+ if (uh == NULL)
+ /* No choice either */
+ return false;
+
+ *port = src ? uh->source : uh->dest;
+ break;
+ }
+ case IPPROTO_ICMP: {
+ struct icmphdr _ich;
+ const struct icmphdr *ic;
+
+ ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
+ if (ic == NULL)
+ return false;
+
+ *port = (__force __be16)htons((ic->type << 8) | ic->code);
+ break;
+ }
+ case IPPROTO_ICMPV6: {
+ struct icmp6hdr _ich;
+ const struct icmp6hdr *ic;
+
+ ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
+ if (ic == NULL)
+ return false;
+
+ *port = (__force __be16)
+ htons((ic->icmp6_type << 8) | ic->icmp6_code);
+ break;
+ }
+ default:
+ break;
+ }
+ *proto = protocol;
+
+ return true;
+}
+
+bool
+ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
+ __be16 *port, u8 *proto)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ unsigned int protooff = ip_hdrlen(skb);
+ int protocol = iph->protocol;
+
+ /* See comments at tcp_match in ip_tables.c */
+ if (protocol <= 0 || (ntohs(iph->frag_off) & IP_OFFSET))
+ return false;
+
+ return get_port(skb, protocol, protooff, src, port, proto);
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ip4_port);
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+bool
+ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
+ __be16 *port, u8 *proto)
+{
+ int protoff;
+ u8 nexthdr;
+
+ nexthdr = ipv6_hdr(skb)->nexthdr;
+ protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
+ if (protoff < 0)
+ return false;
+
+ return get_port(skb, nexthdr, protoff, src, port, proto);
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ip6_port);
+#endif
+
+bool
+ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, __be16 *port)
+{
+ bool ret;
+ u8 proto;
+
+ switch (pf) {
+ case AF_INET:
+ ret = ip_set_get_ip4_port(skb, src, port, &proto);
+ break;
+ case AF_INET6:
+ ret = ip_set_get_ip6_port(skb, src, port, &proto);
+ break;
+ default:
+ return false;
+ }
+ if (!ret)
+ return ret;
+ switch (proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ return true;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ip_port);
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
new file mode 100644
index 0000000..43bcce2
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -0,0 +1,464 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip");
+
+/* Type specific function prefix */
+#define TYPE hash_ip
+
+static bool
+hash_ip_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ip4_same_set hash_ip_same_set
+#define hash_ip6_same_set hash_ip_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ip4_elem {
+ __be32 ip;
+};
+
+/* Member elements with timeout support */
+struct hash_ip4_telem {
+ __be32 ip;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ip4_data_equal(const struct hash_ip4_elem *ip1,
+ const struct hash_ip4_elem *ip2)
+{
+ return ip1->ip == ip2->ip;
+}
+
+static inline bool
+hash_ip4_data_isnull(const struct hash_ip4_elem *elem)
+{
+ return elem->ip == 0;
+}
+
+static inline void
+hash_ip4_data_copy(struct hash_ip4_elem *dst, const struct hash_ip4_elem *src)
+{
+ dst->ip = src->ip;
+}
+
+/* Zero valued IP addresses cannot be stored */
+static inline void
+hash_ip4_data_zero_out(struct hash_ip4_elem *elem)
+{
+ elem->ip = 0;
+}
+
+static inline bool
+hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data)
+{
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ip4_data_tlist(struct sk_buff *skb, const struct hash_ip4_elem *data)
+{
+ const struct hash_ip4_telem *tdata =
+ (const struct hash_ip4_telem *)data;
+
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout)));
+
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#define IP_SET_HASH_WITH_NETMASK
+#define PF 4
+#define HOST_MASK 32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ __be32 ip;
+
+ ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &ip);
+ ip &= ip_set_netmask(h->netmask);
+ if (ip == 0)
+ return -EINVAL;
+
+ return adtfn(set, &ip, h->timeout);
+}
+
+static int
+hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ u32 ip, ip_to, hosts, timeout = h->timeout;
+ __be32 nip;
+ int ret = 0;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+ if (ret)
+ return ret;
+
+ ip &= ip_set_hostmask(h->netmask);
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST) {
+ nip = htonl(ip);
+ if (nip == 0)
+ return -IPSET_ERR_HASH_ELEM;
+ return adtfn(set, &nip, timeout);
+ }
+
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+ if (ret)
+ return ret;
+ if (ip > ip_to)
+ swap(ip, ip_to);
+ } else if (tb[IPSET_ATTR_CIDR]) {
+ u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (cidr > 32)
+ return -IPSET_ERR_INVALID_CIDR;
+ ip &= ip_set_hostmask(cidr);
+ ip_to = ip | ~ip_set_hostmask(cidr);
+ } else
+ ip_to = ip;
+
+ hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
+
+ for (; !before(ip_to, ip); ip += hosts) {
+ nip = htonl(ip);
+ if (nip == 0)
+ return -IPSET_ERR_HASH_ELEM;
+ ret = adtfn(set, &nip, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+static bool
+hash_ip_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct ip_set_hash *x = a->data;
+ const struct ip_set_hash *y = b->data;
+
+ /* Resizing changes htable_bits, so we ignore it */
+ return x->maxelem == y->maxelem &&
+ x->timeout == y->timeout &&
+ x->netmask == y->netmask;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ip6_elem {
+ union nf_inet_addr ip;
+};
+
+struct hash_ip6_telem {
+ union nf_inet_addr ip;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ip6_data_equal(const struct hash_ip6_elem *ip1,
+ const struct hash_ip6_elem *ip2)
+{
+ return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0;
+}
+
+static inline bool
+hash_ip6_data_isnull(const struct hash_ip6_elem *elem)
+{
+ return ipv6_addr_any(&elem->ip.in6);
+}
+
+static inline void
+hash_ip6_data_copy(struct hash_ip6_elem *dst, const struct hash_ip6_elem *src)
+{
+ ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
+}
+
+static inline void
+hash_ip6_data_zero_out(struct hash_ip6_elem *elem)
+{
+ ipv6_addr_set(&elem->ip.in6, 0, 0, 0, 0);
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+ ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+ ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+ ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+ ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static bool
+hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data)
+{
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ip6_data_tlist(struct sk_buff *skb, const struct hash_ip6_elem *data)
+{
+ const struct hash_ip6_telem *e =
+ (const struct hash_ip6_telem *)data;
+
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout)));
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF 6
+#define HOST_MASK 128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ union nf_inet_addr ip;
+
+ ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &ip.in6);
+ ip6_netmask(&ip, h->netmask);
+ if (ipv6_addr_any(&ip.in6))
+ return -EINVAL;
+
+ return adtfn(set, &ip, h->timeout);
+}
+
+static const struct nla_policy hash_ip6_adt_policy[IPSET_ATTR_ADT_MAX + 1] = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+};
+
+static int
+hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ union nf_inet_addr ip;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ tb[IPSET_ATTR_IP_TO] ||
+ tb[IPSET_ATTR_CIDR]))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &ip);
+ if (ret)
+ return ret;
+
+ ip6_netmask(&ip, h->netmask);
+ if (ipv6_addr_any(&ip.in6))
+ return -IPSET_ERR_HASH_ELEM;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ ret = adtfn(set, &ip, timeout);
+
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+ u8 netmask, hbits;
+ struct ip_set_hash *h;
+
+ if (!(set->family == AF_INET || set->family == AF_INET6))
+ return -IPSET_ERR_INVALID_FAMILY;
+ netmask = set->family == AF_INET ? 32 : 128;
+ pr_debug("Create set %s with family %s\n",
+ set->name, set->family == AF_INET ? "inet" : "inet6");
+
+ if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_HASHSIZE]) {
+ hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+ if (hashsize < IPSET_MIMINAL_HASHSIZE)
+ hashsize = IPSET_MIMINAL_HASHSIZE;
+ }
+
+ if (tb[IPSET_ATTR_MAXELEM])
+ maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+ if (tb[IPSET_ATTR_NETMASK]) {
+ netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
+
+ if ((set->family == AF_INET && netmask > 32) ||
+ (set->family == AF_INET6 && netmask > 128) ||
+ netmask == 0)
+ return -IPSET_ERR_INVALID_NETMASK;
+ }
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->maxelem = maxelem;
+ h->netmask = netmask;
+ get_random_bytes(&h->initval, sizeof(h->initval));
+ h->timeout = IPSET_NO_TIMEOUT;
+
+ hbits = htable_bits(hashsize);
+ h->table = ip_set_alloc(
+ sizeof(struct htable)
+ + jhash_size(hbits) * sizeof(struct hbucket));
+ if (!h->table) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table->htable_bits = hbits;
+
+ set->data = h;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+ set->variant = set->family == AF_INET
+ ? &hash_ip4_tvariant : &hash_ip6_tvariant;
+
+ if (set->family == AF_INET)
+ hash_ip4_gc_init(set);
+ else
+ hash_ip6_gc_init(set);
+ } else {
+ set->variant = set->family == AF_INET
+ ? &hash_ip4_variant : &hash_ip6_variant;
+ }
+
+ pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+ set->name, jhash_size(h->table->htable_bits),
+ h->table->htable_bits, h->maxelem, set->data, h->table);
+
+ return 0;
+}
+
+static struct ip_set_type hash_ip_type __read_mostly = {
+ .name = "hash:ip",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP,
+ .dimension = IPSET_DIM_ONE,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = hash_ip_create,
+ .create_policy = {
+ [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
+ [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_NETMASK] = { .type = NLA_U8 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+hash_ip_init(void)
+{
+ return ip_set_type_register(&hash_ip_type);
+}
+
+static void __exit
+hash_ip_fini(void)
+{
+ ip_set_type_unregister(&hash_ip_type);
+}
+
+module_init(hash_ip_init);
+module_exit(hash_ip_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
new file mode 100644
index 0000000..adbe787
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -0,0 +1,544 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,port type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip,port type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip,port");
+
+/* Type specific function prefix */
+#define TYPE hash_ipport
+
+static bool
+hash_ipport_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ipport4_same_set hash_ipport_same_set
+#define hash_ipport6_same_set hash_ipport_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ipport4_elem {
+ __be32 ip;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+};
+
+/* Member elements with timeout support */
+struct hash_ipport4_telem {
+ __be32 ip;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ipport4_data_equal(const struct hash_ipport4_elem *ip1,
+ const struct hash_ipport4_elem *ip2)
+{
+ return ip1->ip == ip2->ip &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipport4_data_isnull(const struct hash_ipport4_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_ipport4_data_copy(struct hash_ipport4_elem *dst,
+ const struct hash_ipport4_elem *src)
+{
+ dst->ip = src->ip;
+ dst->port = src->port;
+ dst->proto = src->proto;
+}
+
+static inline void
+hash_ipport4_data_zero_out(struct hash_ipport4_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static bool
+hash_ipport4_data_list(struct sk_buff *skb,
+ const struct hash_ipport4_elem *data)
+{
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ipport4_data_tlist(struct sk_buff *skb,
+ const struct hash_ipport4_elem *data)
+{
+ const struct hash_ipport4_telem *tdata =
+ (const struct hash_ipport4_telem *)data;
+
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout)));
+
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#define PF 4
+#define HOST_MASK 32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipport4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipport4_elem data = { };
+
+ if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipport4_elem data = { };
+ u32 ip, ip_to, p, port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMP:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
+ tb[IPSET_ATTR_PORT_TO])) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ ip = ntohl(data.ip);
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+ if (ret)
+ return ret;
+ if (ip > ip_to)
+ swap(ip, ip_to);
+ } else if (tb[IPSET_ATTR_CIDR]) {
+ u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (cidr > 32)
+ return -IPSET_ERR_INVALID_CIDR;
+ ip &= ip_set_hostmask(cidr);
+ ip_to = ip | ~ip_set_hostmask(cidr);
+ } else
+ ip_to = ip;
+
+ port = ntohs(data.port);
+ if (tb[IPSET_ATTR_PORT_TO]) {
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+ } else
+ port_to = port;
+
+ for (; !before(ip_to, ip); ip++)
+ for (p = port; p <= port_to; p++) {
+ data.ip = htonl(ip);
+ data.port = htons(p);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+static bool
+hash_ipport_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct ip_set_hash *x = a->data;
+ const struct ip_set_hash *y = b->data;
+
+ /* Resizing changes htable_bits, so we ignore it */
+ return x->maxelem == y->maxelem &&
+ x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ipport6_elem {
+ union nf_inet_addr ip;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+};
+
+struct hash_ipport6_telem {
+ union nf_inet_addr ip;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ipport6_data_equal(const struct hash_ipport6_elem *ip1,
+ const struct hash_ipport6_elem *ip2)
+{
+ return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipport6_data_isnull(const struct hash_ipport6_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_ipport6_data_copy(struct hash_ipport6_elem *dst,
+ const struct hash_ipport6_elem *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipport6_data_zero_out(struct hash_ipport6_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static bool
+hash_ipport6_data_list(struct sk_buff *skb,
+ const struct hash_ipport6_elem *data)
+{
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ipport6_data_tlist(struct sk_buff *skb,
+ const struct hash_ipport6_elem *data)
+{
+ const struct hash_ipport6_telem *e =
+ (const struct hash_ipport6_telem *)data;
+
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout)));
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF 6
+#define HOST_MASK 128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipport6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipport6_elem data = { };
+
+ if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipport6_elem data = { };
+ u32 port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ tb[IPSET_ATTR_IP_TO] ||
+ tb[IPSET_ATTR_CIDR]))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMPV6:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !tb[IPSET_ATTR_PORT_TO]) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ port = ntohs(data.port);
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+
+ for (; port <= port_to; port++) {
+ data.port = htons(port);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ struct ip_set_hash *h;
+ u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+ u8 hbits;
+
+ if (!(set->family == AF_INET || set->family == AF_INET6))
+ return -IPSET_ERR_INVALID_FAMILY;
+
+ if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_HASHSIZE]) {
+ hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+ if (hashsize < IPSET_MIMINAL_HASHSIZE)
+ hashsize = IPSET_MIMINAL_HASHSIZE;
+ }
+
+ if (tb[IPSET_ATTR_MAXELEM])
+ maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->maxelem = maxelem;
+ get_random_bytes(&h->initval, sizeof(h->initval));
+ h->timeout = IPSET_NO_TIMEOUT;
+
+ hbits = htable_bits(hashsize);
+ h->table = ip_set_alloc(
+ sizeof(struct htable)
+ + jhash_size(hbits) * sizeof(struct hbucket));
+ if (!h->table) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table->htable_bits = hbits;
+
+ set->data = h;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+ set->variant = set->family == AF_INET
+ ? &hash_ipport4_tvariant : &hash_ipport6_tvariant;
+
+ if (set->family == AF_INET)
+ hash_ipport4_gc_init(set);
+ else
+ hash_ipport6_gc_init(set);
+ } else {
+ set->variant = set->family == AF_INET
+ ? &hash_ipport4_variant : &hash_ipport6_variant;
+ }
+
+ pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+ set->name, jhash_size(h->table->htable_bits),
+ h->table->htable_bits, h->maxelem, set->data, h->table);
+
+ return 0;
+}
+
+static struct ip_set_type hash_ipport_type __read_mostly = {
+ .name = "hash:ip,port",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
+ .dimension = IPSET_DIM_TWO,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = hash_ipport_create,
+ .create_policy = {
+ [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
+ [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
+ [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_PORT] = { .type = NLA_U16 },
+ [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+hash_ipport_init(void)
+{
+ return ip_set_type_register(&hash_ipport_type);
+}
+
+static void __exit
+hash_ipport_fini(void)
+{
+ ip_set_type_unregister(&hash_ipport_type);
+}
+
+module_init(hash_ipport_init);
+module_exit(hash_ipport_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
new file mode 100644
index 0000000..22e23ab
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -0,0 +1,562 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,port,ip type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip,port,ip type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip,port,ip");
+
+/* Type specific function prefix */
+#define TYPE hash_ipportip
+
+static bool
+hash_ipportip_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ipportip4_same_set hash_ipportip_same_set
+#define hash_ipportip6_same_set hash_ipportip_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ipportip4_elem {
+ __be32 ip;
+ __be32 ip2;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+};
+
+/* Member elements with timeout support */
+struct hash_ipportip4_telem {
+ __be32 ip;
+ __be32 ip2;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ipportip4_data_equal(const struct hash_ipportip4_elem *ip1,
+ const struct hash_ipportip4_elem *ip2)
+{
+ return ip1->ip == ip2->ip &&
+ ip1->ip2 == ip2->ip2 &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportip4_data_isnull(const struct hash_ipportip4_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_ipportip4_data_copy(struct hash_ipportip4_elem *dst,
+ const struct hash_ipportip4_elem *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportip4_data_zero_out(struct hash_ipportip4_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static bool
+hash_ipportip4_data_list(struct sk_buff *skb,
+ const struct hash_ipportip4_elem *data)
+{
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ipportip4_data_tlist(struct sk_buff *skb,
+ const struct hash_ipportip4_elem *data)
+{
+ const struct hash_ipportip4_telem *tdata =
+ (const struct hash_ipportip4_telem *)data;
+
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout)));
+
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#define PF 4
+#define HOST_MASK 32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportip4_elem data = { };
+
+ if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+ ip4addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportip4_elem data = { };
+ u32 ip, ip_to, p, port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &data.ip2);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMP:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
+ tb[IPSET_ATTR_PORT_TO])) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ ip = ntohl(data.ip);
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+ if (ret)
+ return ret;
+ if (ip > ip_to)
+ swap(ip, ip_to);
+ } else if (tb[IPSET_ATTR_CIDR]) {
+ u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (cidr > 32)
+ return -IPSET_ERR_INVALID_CIDR;
+ ip &= ip_set_hostmask(cidr);
+ ip_to = ip | ~ip_set_hostmask(cidr);
+ } else
+ ip_to = ip;
+
+ port = ntohs(data.port);
+ if (tb[IPSET_ATTR_PORT_TO]) {
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+ } else
+ port_to = port;
+
+ for (; !before(ip_to, ip); ip++)
+ for (p = port; p <= port_to; p++) {
+ data.ip = htonl(ip);
+ data.port = htons(p);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+static bool
+hash_ipportip_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct ip_set_hash *x = a->data;
+ const struct ip_set_hash *y = b->data;
+
+ /* Resizing changes htable_bits, so we ignore it */
+ return x->maxelem == y->maxelem &&
+ x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ipportip6_elem {
+ union nf_inet_addr ip;
+ union nf_inet_addr ip2;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+};
+
+struct hash_ipportip6_telem {
+ union nf_inet_addr ip;
+ union nf_inet_addr ip2;
+ __be16 port;
+ u8 proto;
+ u8 padding;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ipportip6_data_equal(const struct hash_ipportip6_elem *ip1,
+ const struct hash_ipportip6_elem *ip2)
+{
+ return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+ ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportip6_data_isnull(const struct hash_ipportip6_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_ipportip6_data_copy(struct hash_ipportip6_elem *dst,
+ const struct hash_ipportip6_elem *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportip6_data_zero_out(struct hash_ipportip6_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static bool
+hash_ipportip6_data_list(struct sk_buff *skb,
+ const struct hash_ipportip6_elem *data)
+{
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ipportip6_data_tlist(struct sk_buff *skb,
+ const struct hash_ipportip6_elem *data)
+{
+ const struct hash_ipportip6_telem *e =
+ (const struct hash_ipportip6_telem *)data;
+
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout)));
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF 6
+#define HOST_MASK 128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportip6_elem data = { };
+
+ if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+ ip6addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportip6_elem data = { };
+ u32 port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ tb[IPSET_ATTR_IP_TO] ||
+ tb[IPSET_ATTR_CIDR]))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &data.ip2);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMPV6:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !tb[IPSET_ATTR_PORT_TO]) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ port = ntohs(data.port);
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+
+ for (; port <= port_to; port++) {
+ data.port = htons(port);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ struct ip_set_hash *h;
+ u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+ u8 hbits;
+
+ if (!(set->family == AF_INET || set->family == AF_INET6))
+ return -IPSET_ERR_INVALID_FAMILY;
+
+ if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_HASHSIZE]) {
+ hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+ if (hashsize < IPSET_MIMINAL_HASHSIZE)
+ hashsize = IPSET_MIMINAL_HASHSIZE;
+ }
+
+ if (tb[IPSET_ATTR_MAXELEM])
+ maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->maxelem = maxelem;
+ get_random_bytes(&h->initval, sizeof(h->initval));
+ h->timeout = IPSET_NO_TIMEOUT;
+
+ hbits = htable_bits(hashsize);
+ h->table = ip_set_alloc(
+ sizeof(struct htable)
+ + jhash_size(hbits) * sizeof(struct hbucket));
+ if (!h->table) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table->htable_bits = hbits;
+
+ set->data = h;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+ set->variant = set->family == AF_INET
+ ? &hash_ipportip4_tvariant : &hash_ipportip6_tvariant;
+
+ if (set->family == AF_INET)
+ hash_ipportip4_gc_init(set);
+ else
+ hash_ipportip6_gc_init(set);
+ } else {
+ set->variant = set->family == AF_INET
+ ? &hash_ipportip4_variant : &hash_ipportip6_variant;
+ }
+
+ pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+ set->name, jhash_size(h->table->htable_bits),
+ h->table->htable_bits, h->maxelem, set->data, h->table);
+
+ return 0;
+}
+
+static struct ip_set_type hash_ipportip_type __read_mostly = {
+ .name = "hash:ip,port,ip",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
+ .dimension = IPSET_DIM_THREE,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = hash_ipportip_create,
+ .create_policy = {
+ [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
+ [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP2] = { .type = NLA_NESTED },
+ [IPSET_ATTR_PORT] = { .type = NLA_U16 },
+ [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+hash_ipportip_init(void)
+{
+ return ip_set_type_register(&hash_ipportip_type);
+}
+
+static void __exit
+hash_ipportip_fini(void)
+{
+ ip_set_type_unregister(&hash_ipportip_type);
+}
+
+module_init(hash_ipportip_init);
+module_exit(hash_ipportip_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
new file mode 100644
index 0000000..6033e8b
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -0,0 +1,628 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,port,net type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip,port,net type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip,port,net");
+
+/* Type specific function prefix */
+#define TYPE hash_ipportnet
+
+static bool
+hash_ipportnet_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ipportnet4_same_set hash_ipportnet_same_set
+#define hash_ipportnet6_same_set hash_ipportnet_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ipportnet4_elem {
+ __be32 ip;
+ __be32 ip2;
+ __be16 port;
+ u8 cidr;
+ u8 proto;
+};
+
+/* Member elements with timeout support */
+struct hash_ipportnet4_telem {
+ __be32 ip;
+ __be32 ip2;
+ __be16 port;
+ u8 cidr;
+ u8 proto;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ipportnet4_data_equal(const struct hash_ipportnet4_elem *ip1,
+ const struct hash_ipportnet4_elem *ip2)
+{
+ return ip1->ip == ip2->ip &&
+ ip1->ip2 == ip2->ip2 &&
+ ip1->cidr == ip2->cidr &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportnet4_data_isnull(const struct hash_ipportnet4_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_ipportnet4_data_copy(struct hash_ipportnet4_elem *dst,
+ const struct hash_ipportnet4_elem *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr)
+{
+ elem->ip2 &= ip_set_netmask(cidr);
+ elem->cidr = cidr;
+}
+
+static inline void
+hash_ipportnet4_data_zero_out(struct hash_ipportnet4_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static bool
+hash_ipportnet4_data_list(struct sk_buff *skb,
+ const struct hash_ipportnet4_elem *data)
+{
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ipportnet4_data_tlist(struct sk_buff *skb,
+ const struct hash_ipportnet4_elem *data)
+{
+ const struct hash_ipportnet4_telem *tdata =
+ (const struct hash_ipportnet4_telem *)data;
+
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout)));
+
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#define IP_SET_HASH_WITH_PROTO
+#define IP_SET_HASH_WITH_NETS
+
+#define PF 4
+#define HOST_MASK 32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportnet4_elem data =
+ { .cidr = h->nets[0].cidr || HOST_MASK };
+
+ if (data.cidr == 0)
+ return -EINVAL;
+ if (adt == IPSET_TEST)
+ data.cidr = HOST_MASK;
+
+ if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+ ip4addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2);
+ data.ip2 &= ip_set_netmask(data.cidr);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportnet4_elem data = { .cidr = HOST_MASK };
+ u32 ip, ip_to, p, port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &data.ip2);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_CIDR2])
+ data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+
+ if (!data.cidr)
+ return -IPSET_ERR_INVALID_CIDR;
+
+ data.ip2 &= ip_set_netmask(data.cidr);
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMP:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
+ tb[IPSET_ATTR_PORT_TO])) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ ip = ntohl(data.ip);
+ if (tb[IPSET_ATTR_IP_TO]) {
+ ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+ if (ret)
+ return ret;
+ if (ip > ip_to)
+ swap(ip, ip_to);
+ } else if (tb[IPSET_ATTR_CIDR]) {
+ u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (cidr > 32)
+ return -IPSET_ERR_INVALID_CIDR;
+ ip &= ip_set_hostmask(cidr);
+ ip_to = ip | ~ip_set_hostmask(cidr);
+ } else
+ ip_to = ip;
+
+ port = ntohs(data.port);
+ if (tb[IPSET_ATTR_PORT_TO]) {
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+ } else
+ port_to = port;
+
+ for (; !before(ip_to, ip); ip++)
+ for (p = port; p <= port_to; p++) {
+ data.ip = htonl(ip);
+ data.port = htons(p);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+static bool
+hash_ipportnet_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct ip_set_hash *x = a->data;
+ const struct ip_set_hash *y = b->data;
+
+ /* Resizing changes htable_bits, so we ignore it */
+ return x->maxelem == y->maxelem &&
+ x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ipportnet6_elem {
+ union nf_inet_addr ip;
+ union nf_inet_addr ip2;
+ __be16 port;
+ u8 cidr;
+ u8 proto;
+};
+
+struct hash_ipportnet6_telem {
+ union nf_inet_addr ip;
+ union nf_inet_addr ip2;
+ __be16 port;
+ u8 cidr;
+ u8 proto;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1,
+ const struct hash_ipportnet6_elem *ip2)
+{
+ return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+ ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 &&
+ ip1->cidr == ip2->cidr &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportnet6_data_isnull(const struct hash_ipportnet6_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_ipportnet6_data_copy(struct hash_ipportnet6_elem *dst,
+ const struct hash_ipportnet6_elem *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportnet6_data_zero_out(struct hash_ipportnet6_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+ ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+ ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+ ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+ ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static inline void
+hash_ipportnet6_data_netmask(struct hash_ipportnet6_elem *elem, u8 cidr)
+{
+ ip6_netmask(&elem->ip2, cidr);
+ elem->cidr = cidr;
+}
+
+static bool
+hash_ipportnet6_data_list(struct sk_buff *skb,
+ const struct hash_ipportnet6_elem *data)
+{
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_ipportnet6_data_tlist(struct sk_buff *skb,
+ const struct hash_ipportnet6_elem *data)
+{
+ const struct hash_ipportnet6_telem *e =
+ (const struct hash_ipportnet6_telem *)data;
+
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout)));
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF 6
+#define HOST_MASK 128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportnet6_elem data =
+ { .cidr = h->nets[0].cidr || HOST_MASK };
+
+ if (data.cidr == 0)
+ return -EINVAL;
+ if (adt == IPSET_TEST)
+ data.cidr = HOST_MASK;
+
+ if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+ ip6addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
+ ip6_netmask(&data.ip2, data.cidr);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_ipportnet6_elem data = { .cidr = HOST_MASK };
+ u32 port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ tb[IPSET_ATTR_IP_TO] ||
+ tb[IPSET_ATTR_CIDR]))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &data.ip2);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_CIDR2])
+ data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+
+ if (!data.cidr)
+ return -IPSET_ERR_INVALID_CIDR;
+
+ ip6_netmask(&data.ip2, data.cidr);
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMPV6:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !tb[IPSET_ATTR_PORT_TO]) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ port = ntohs(data.port);
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+
+ for (; port <= port_to; port++) {
+ data.port = htons(port);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ struct ip_set_hash *h;
+ u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+ u8 hbits;
+
+ if (!(set->family == AF_INET || set->family == AF_INET6))
+ return -IPSET_ERR_INVALID_FAMILY;
+
+ if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_HASHSIZE]) {
+ hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+ if (hashsize < IPSET_MIMINAL_HASHSIZE)
+ hashsize = IPSET_MIMINAL_HASHSIZE;
+ }
+
+ if (tb[IPSET_ATTR_MAXELEM])
+ maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+ h = kzalloc(sizeof(*h)
+ + sizeof(struct ip_set_hash_nets)
+ * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->maxelem = maxelem;
+ get_random_bytes(&h->initval, sizeof(h->initval));
+ h->timeout = IPSET_NO_TIMEOUT;
+
+ hbits = htable_bits(hashsize);
+ h->table = ip_set_alloc(
+ sizeof(struct htable)
+ + jhash_size(hbits) * sizeof(struct hbucket));
+ if (!h->table) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table->htable_bits = hbits;
+
+ set->data = h;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+ set->variant = set->family == AF_INET
+ ? &hash_ipportnet4_tvariant
+ : &hash_ipportnet6_tvariant;
+
+ if (set->family == AF_INET)
+ hash_ipportnet4_gc_init(set);
+ else
+ hash_ipportnet6_gc_init(set);
+ } else {
+ set->variant = set->family == AF_INET
+ ? &hash_ipportnet4_variant : &hash_ipportnet6_variant;
+ }
+
+ pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+ set->name, jhash_size(h->table->htable_bits),
+ h->table->htable_bits, h->maxelem, set->data, h->table);
+
+ return 0;
+}
+
+static struct ip_set_type hash_ipportnet_type __read_mostly = {
+ .name = "hash:ip,port,net",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
+ .dimension = IPSET_DIM_THREE,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = hash_ipportnet_create,
+ .create_policy = {
+ [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
+ [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
+ [IPSET_ATTR_IP2] = { .type = NLA_NESTED },
+ [IPSET_ATTR_PORT] = { .type = NLA_U16 },
+ [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_CIDR2] = { .type = NLA_U8 },
+ [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+hash_ipportnet_init(void)
+{
+ return ip_set_type_register(&hash_ipportnet_type);
+}
+
+static void __exit
+hash_ipportnet_fini(void)
+{
+ ip_set_type_unregister(&hash_ipportnet_type);
+}
+
+module_init(hash_ipportnet_init);
+module_exit(hash_ipportnet_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
new file mode 100644
index 0000000..c4db202
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -0,0 +1,458 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:net type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:net type of IP sets");
+MODULE_ALIAS("ip_set_hash:net");
+
+/* Type specific function prefix */
+#define TYPE hash_net
+
+static bool
+hash_net_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_net4_same_set hash_net_same_set
+#define hash_net6_same_set hash_net_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_net4_elem {
+ __be32 ip;
+ u16 padding0;
+ u8 padding1;
+ u8 cidr;
+};
+
+/* Member elements with timeout support */
+struct hash_net4_telem {
+ __be32 ip;
+ u16 padding0;
+ u8 padding1;
+ u8 cidr;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_net4_data_equal(const struct hash_net4_elem *ip1,
+ const struct hash_net4_elem *ip2)
+{
+ return ip1->ip == ip2->ip && ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_net4_data_isnull(const struct hash_net4_elem *elem)
+{
+ return elem->cidr == 0;
+}
+
+static inline void
+hash_net4_data_copy(struct hash_net4_elem *dst,
+ const struct hash_net4_elem *src)
+{
+ dst->ip = src->ip;
+ dst->cidr = src->cidr;
+}
+
+static inline void
+hash_net4_data_netmask(struct hash_net4_elem *elem, u8 cidr)
+{
+ elem->ip &= ip_set_netmask(cidr);
+ elem->cidr = cidr;
+}
+
+/* Zero CIDR values cannot be stored */
+static inline void
+hash_net4_data_zero_out(struct hash_net4_elem *elem)
+{
+ elem->cidr = 0;
+}
+
+static bool
+hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data)
+{
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_net4_data_tlist(struct sk_buff *skb, const struct hash_net4_elem *data)
+{
+ const struct hash_net4_telem *tdata =
+ (const struct hash_net4_telem *)data;
+
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, tdata->cidr);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout)));
+
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#define IP_SET_HASH_WITH_NETS
+
+#define PF 4
+#define HOST_MASK 32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_net4_elem data = { .cidr = h->nets[0].cidr || HOST_MASK };
+
+ if (data.cidr == 0)
+ return -EINVAL;
+ if (adt == IPSET_TEST)
+ data.cidr = HOST_MASK;
+
+ ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+ data.ip &= ip_set_netmask(data.cidr);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_net4_elem data = { .cidr = HOST_MASK };
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_CIDR])
+ data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (!data.cidr)
+ return -IPSET_ERR_INVALID_CIDR;
+
+ data.ip &= ip_set_netmask(data.cidr);
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ ret = adtfn(set, &data, timeout);
+
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+static bool
+hash_net_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct ip_set_hash *x = a->data;
+ const struct ip_set_hash *y = b->data;
+
+ /* Resizing changes htable_bits, so we ignore it */
+ return x->maxelem == y->maxelem &&
+ x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_net6_elem {
+ union nf_inet_addr ip;
+ u16 padding0;
+ u8 padding1;
+ u8 cidr;
+};
+
+struct hash_net6_telem {
+ union nf_inet_addr ip;
+ u16 padding0;
+ u8 padding1;
+ u8 cidr;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_net6_data_equal(const struct hash_net6_elem *ip1,
+ const struct hash_net6_elem *ip2)
+{
+ return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+ ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_net6_data_isnull(const struct hash_net6_elem *elem)
+{
+ return elem->cidr == 0;
+}
+
+static inline void
+hash_net6_data_copy(struct hash_net6_elem *dst,
+ const struct hash_net6_elem *src)
+{
+ ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
+ dst->cidr = src->cidr;
+}
+
+static inline void
+hash_net6_data_zero_out(struct hash_net6_elem *elem)
+{
+ elem->cidr = 0;
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+ ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+ ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+ ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+ ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static inline void
+hash_net6_data_netmask(struct hash_net6_elem *elem, u8 cidr)
+{
+ ip6_netmask(&elem->ip, cidr);
+ elem->cidr = cidr;
+}
+
+static bool
+hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data)
+{
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_net6_data_tlist(struct sk_buff *skb, const struct hash_net6_elem *data)
+{
+ const struct hash_net6_telem *e =
+ (const struct hash_net6_telem *)data;
+
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout)));
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF 6
+#define HOST_MASK 128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_net6_elem data = { .cidr = h->nets[0].cidr || HOST_MASK };
+
+ if (data.cidr == 0)
+ return -EINVAL;
+ if (adt == IPSET_TEST)
+ data.cidr = HOST_MASK;
+
+ ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+ ip6_netmask(&data.ip, data.cidr);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_net6_elem data = { .cidr = HOST_MASK };
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_CIDR])
+ data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+ if (!data.cidr)
+ return -IPSET_ERR_INVALID_CIDR;
+
+ ip6_netmask(&data.ip, data.cidr);
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ ret = adtfn(set, &data, timeout);
+
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+ struct ip_set_hash *h;
+ u8 hbits;
+
+ if (!(set->family == AF_INET || set->family == AF_INET6))
+ return -IPSET_ERR_INVALID_FAMILY;
+
+ if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_HASHSIZE]) {
+ hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+ if (hashsize < IPSET_MIMINAL_HASHSIZE)
+ hashsize = IPSET_MIMINAL_HASHSIZE;
+ }
+
+ if (tb[IPSET_ATTR_MAXELEM])
+ maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+ h = kzalloc(sizeof(*h)
+ + sizeof(struct ip_set_hash_nets)
+ * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->maxelem = maxelem;
+ get_random_bytes(&h->initval, sizeof(h->initval));
+ h->timeout = IPSET_NO_TIMEOUT;
+
+ hbits = htable_bits(hashsize);
+ h->table = ip_set_alloc(
+ sizeof(struct htable)
+ + jhash_size(hbits) * sizeof(struct hbucket));
+ if (!h->table) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table->htable_bits = hbits;
+
+ set->data = h;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+ set->variant = set->family == AF_INET
+ ? &hash_net4_tvariant : &hash_net6_tvariant;
+
+ if (set->family == AF_INET)
+ hash_net4_gc_init(set);
+ else
+ hash_net6_gc_init(set);
+ } else {
+ set->variant = set->family == AF_INET
+ ? &hash_net4_variant : &hash_net6_variant;
+ }
+
+ pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+ set->name, jhash_size(h->table->htable_bits),
+ h->table->htable_bits, h->maxelem, set->data, h->table);
+
+ return 0;
+}
+
+static struct ip_set_type hash_net_type __read_mostly = {
+ .name = "hash:net",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP,
+ .dimension = IPSET_DIM_ONE,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = hash_net_create,
+ .create_policy = {
+ [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
+ [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+hash_net_init(void)
+{
+ return ip_set_type_register(&hash_net_type);
+}
+
+static void __exit
+hash_net_fini(void)
+{
+ ip_set_type_unregister(&hash_net_type);
+}
+
+module_init(hash_net_init);
+module_exit(hash_net_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
new file mode 100644
index 0000000..34a1656
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -0,0 +1,578 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:net,port type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:net,port type of IP sets");
+MODULE_ALIAS("ip_set_hash:net,port");
+
+/* Type specific function prefix */
+#define TYPE hash_netport
+
+static bool
+hash_netport_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_netport4_same_set hash_netport_same_set
+#define hash_netport6_same_set hash_netport_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_netport4_elem {
+ __be32 ip;
+ __be16 port;
+ u8 proto;
+ u8 cidr;
+};
+
+/* Member elements with timeout support */
+struct hash_netport4_telem {
+ __be32 ip;
+ __be16 port;
+ u8 proto;
+ u8 cidr;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_netport4_data_equal(const struct hash_netport4_elem *ip1,
+ const struct hash_netport4_elem *ip2)
+{
+ return ip1->ip == ip2->ip &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto &&
+ ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_netport4_data_isnull(const struct hash_netport4_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_netport4_data_copy(struct hash_netport4_elem *dst,
+ const struct hash_netport4_elem *src)
+{
+ dst->ip = src->ip;
+ dst->port = src->port;
+ dst->proto = src->proto;
+ dst->cidr = src->cidr;
+}
+
+static inline void
+hash_netport4_data_netmask(struct hash_netport4_elem *elem, u8 cidr)
+{
+ elem->ip &= ip_set_netmask(cidr);
+ elem->cidr = cidr;
+}
+
+static inline void
+hash_netport4_data_zero_out(struct hash_netport4_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static bool
+hash_netport4_data_list(struct sk_buff *skb,
+ const struct hash_netport4_elem *data)
+{
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_netport4_data_tlist(struct sk_buff *skb,
+ const struct hash_netport4_elem *data)
+{
+ const struct hash_netport4_telem *tdata =
+ (const struct hash_netport4_telem *)data;
+
+ NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(tdata->timeout)));
+
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#define IP_SET_HASH_WITH_PROTO
+#define IP_SET_HASH_WITH_NETS
+
+#define PF 4
+#define HOST_MASK 32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_netport4_elem data = {
+ .cidr = h->nets[0].cidr || HOST_MASK };
+
+ if (data.cidr == 0)
+ return -EINVAL;
+ if (adt == IPSET_TEST)
+ data.cidr = HOST_MASK;
+
+ if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+ data.ip &= ip_set_netmask(data.cidr);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_netport4_elem data = { .cidr = HOST_MASK };
+ u32 port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_CIDR])
+ data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+ if (!data.cidr)
+ return -IPSET_ERR_INVALID_CIDR;
+ data.ip &= ip_set_netmask(data.cidr);
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMP:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !tb[IPSET_ATTR_PORT_TO]) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ port = ntohs(data.port);
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+
+ for (; port <= port_to; port++) {
+ data.port = htons(port);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+static bool
+hash_netport_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct ip_set_hash *x = a->data;
+ const struct ip_set_hash *y = b->data;
+
+ /* Resizing changes htable_bits, so we ignore it */
+ return x->maxelem == y->maxelem &&
+ x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_netport6_elem {
+ union nf_inet_addr ip;
+ __be16 port;
+ u8 proto;
+ u8 cidr;
+};
+
+struct hash_netport6_telem {
+ union nf_inet_addr ip;
+ __be16 port;
+ u8 proto;
+ u8 cidr;
+ unsigned long timeout;
+};
+
+static inline bool
+hash_netport6_data_equal(const struct hash_netport6_elem *ip1,
+ const struct hash_netport6_elem *ip2)
+{
+ return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+ ip1->port == ip2->port &&
+ ip1->proto == ip2->proto &&
+ ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_netport6_data_isnull(const struct hash_netport6_elem *elem)
+{
+ return elem->proto == 0;
+}
+
+static inline void
+hash_netport6_data_copy(struct hash_netport6_elem *dst,
+ const struct hash_netport6_elem *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_netport6_data_zero_out(struct hash_netport6_elem *elem)
+{
+ elem->proto = 0;
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+ ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+ ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+ ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+ ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static inline void
+hash_netport6_data_netmask(struct hash_netport6_elem *elem, u8 cidr)
+{
+ ip6_netmask(&elem->ip, cidr);
+ elem->cidr = cidr;
+}
+
+static bool
+hash_netport6_data_list(struct sk_buff *skb,
+ const struct hash_netport6_elem *data)
+{
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+static bool
+hash_netport6_data_tlist(struct sk_buff *skb,
+ const struct hash_netport6_elem *data)
+{
+ const struct hash_netport6_telem *e =
+ (const struct hash_netport6_telem *)data;
+
+ NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+ NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(e->timeout)));
+ return 0;
+
+nla_put_failure:
+ return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF 6
+#define HOST_MASK 128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_netport6_elem data = {
+ .cidr = h->nets[0].cidr || HOST_MASK };
+
+ if (data.cidr == 0)
+ return -EINVAL;
+ if (adt == IPSET_TEST)
+ data.cidr = HOST_MASK;
+
+ if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+ &data.port, &data.proto))
+ return -EINVAL;
+
+ ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+ ip6_netmask(&data.ip, data.cidr);
+
+ return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ const struct ip_set_hash *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_netport6_elem data = { .cidr = HOST_MASK };
+ u32 port, port_to;
+ u32 timeout = h->timeout;
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_IP] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+ if (ret)
+ return ret;
+
+ if (tb[IPSET_ATTR_CIDR])
+ data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+ if (!data.cidr)
+ return -IPSET_ERR_INVALID_CIDR;
+ ip6_netmask(&data.ip, data.cidr);
+
+ if (tb[IPSET_ATTR_PORT])
+ data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+ else
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_PROTO]) {
+ data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+ if (data.proto == 0)
+ return -IPSET_ERR_INVALID_PROTO;
+ } else
+ return -IPSET_ERR_MISSING_PROTO;
+
+ switch (data.proto) {
+ case IPPROTO_UDP:
+ case IPPROTO_TCP:
+ case IPPROTO_ICMPV6:
+ break;
+ default:
+ data.port = 0;
+ break;
+ }
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout(h->timeout))
+ return -IPSET_ERR_TIMEOUT;
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ if (adt == IPSET_TEST ||
+ !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+ !tb[IPSET_ATTR_PORT_TO]) {
+ ret = adtfn(set, &data, timeout);
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+ }
+
+ port = ntohs(data.port);
+ port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+ if (port > port_to)
+ swap(port, port_to);
+
+ for (; port <= port_to; port++) {
+ data.port = htons(port);
+ ret = adtfn(set, &data, timeout);
+
+ if (ret && !ip_set_eexist(ret, flags))
+ return ret;
+ else
+ ret = 0;
+ }
+ return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ struct ip_set_hash *h;
+ u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+ u8 hbits;
+
+ if (!(set->family == AF_INET || set->family == AF_INET6))
+ return -IPSET_ERR_INVALID_FAMILY;
+
+ if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_HASHSIZE]) {
+ hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+ if (hashsize < IPSET_MIMINAL_HASHSIZE)
+ hashsize = IPSET_MIMINAL_HASHSIZE;
+ }
+
+ if (tb[IPSET_ATTR_MAXELEM])
+ maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+ h = kzalloc(sizeof(*h)
+ + sizeof(struct ip_set_hash_nets)
+ * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->maxelem = maxelem;
+ get_random_bytes(&h->initval, sizeof(h->initval));
+ h->timeout = IPSET_NO_TIMEOUT;
+
+ hbits = htable_bits(hashsize);
+ h->table = ip_set_alloc(
+ sizeof(struct htable)
+ + jhash_size(hbits) * sizeof(struct hbucket));
+ if (!h->table) {
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->table->htable_bits = hbits;
+
+ set->data = h;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+ set->variant = set->family == AF_INET
+ ? &hash_netport4_tvariant : &hash_netport6_tvariant;
+
+ if (set->family == AF_INET)
+ hash_netport4_gc_init(set);
+ else
+ hash_netport6_gc_init(set);
+ } else {
+ set->variant = set->family == AF_INET
+ ? &hash_netport4_variant : &hash_netport6_variant;
+ }
+
+ pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+ set->name, jhash_size(h->table->htable_bits),
+ h->table->htable_bits, h->maxelem, set->data, h->table);
+
+ return 0;
+}
+
+static struct ip_set_type hash_netport_type __read_mostly = {
+ .name = "hash:net,port",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
+ .dimension = IPSET_DIM_TWO,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = hash_netport_create,
+ .create_policy = {
+ [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
+ [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
+ [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_IP] = { .type = NLA_NESTED },
+ [IPSET_ATTR_PORT] = { .type = NLA_U16 },
+ [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
+ [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
+ [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+hash_netport_init(void)
+{
+ return ip_set_type_register(&hash_netport_type);
+}
+
+static void __exit
+hash_netport_fini(void)
+{
+ ip_set_type_unregister(&hash_netport_type);
+}
+
+module_init(hash_netport_init);
+module_exit(hash_netport_fini);
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
new file mode 100644
index 0000000..a47c329
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -0,0 +1,584 @@
+/* Copyright (C) 2008-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the list:set type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_list.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("list:set type of IP sets");
+MODULE_ALIAS("ip_set_list:set");
+
+/* Member elements without and with timeout */
+struct set_elem {
+ ip_set_id_t id;
+};
+
+struct set_telem {
+ ip_set_id_t id;
+ unsigned long timeout;
+};
+
+/* Type structure */
+struct list_set {
+ size_t dsize; /* element size */
+ u32 size; /* size of set list array */
+ u32 timeout; /* timeout value */
+ struct timer_list gc; /* garbage collection */
+ struct set_elem members[0]; /* the set members */
+};
+
+static inline struct set_elem *
+list_set_elem(const struct list_set *map, u32 id)
+{
+ return (struct set_elem *)((char *)map->members + id * map->dsize);
+}
+
+static inline bool
+list_set_timeout(const struct list_set *map, u32 id)
+{
+ const struct set_telem *elem =
+ (const struct set_telem *) list_set_elem(map, id);
+
+ return ip_set_timeout_test(elem->timeout);
+}
+
+static inline bool
+list_set_expired(const struct list_set *map, u32 id)
+{
+ const struct set_telem *elem =
+ (const struct set_telem *) list_set_elem(map, id);
+
+ return ip_set_timeout_expired(elem->timeout);
+}
+
+static inline int
+list_set_exist(const struct set_telem *elem)
+{
+ return elem->id != IPSET_INVALID_ID &&
+ !ip_set_timeout_expired(elem->timeout);
+}
+
+/* Set list without and with timeout */
+
+static int
+list_set_kadt(struct ip_set *set, const struct sk_buff *skb,
+ enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+ struct list_set *map = set->data;
+ struct set_elem *elem;
+ u32 i;
+ int ret;
+
+ for (i = 0; i < map->size; i++) {
+ elem = list_set_elem(map, i);
+ if (elem->id == IPSET_INVALID_ID)
+ return 0;
+ if (with_timeout(map->timeout) && list_set_expired(map, i))
+ continue;
+ switch (adt) {
+ case IPSET_TEST:
+ ret = ip_set_test(elem->id, skb, pf, dim, flags);
+ if (ret > 0)
+ return ret;
+ break;
+ case IPSET_ADD:
+ ret = ip_set_add(elem->id, skb, pf, dim, flags);
+ if (ret == 0)
+ return ret;
+ break;
+ case IPSET_DEL:
+ ret = ip_set_del(elem->id, skb, pf, dim, flags);
+ if (ret == 0)
+ return ret;
+ break;
+ default:
+ break;
+ }
+ }
+ return -EINVAL;
+}
+
+static bool
+next_id_eq(const struct list_set *map, u32 i, ip_set_id_t id)
+{
+ const struct set_elem *elem;
+
+ if (i + 1 < map->size) {
+ elem = list_set_elem(map, i + 1);
+ return !!(elem->id == id &&
+ !(with_timeout(map->timeout) &&
+ list_set_expired(map, i + 1)));
+ }
+
+ return 0;
+}
+
+static void
+list_elem_add(struct list_set *map, u32 i, ip_set_id_t id)
+{
+ struct set_elem *e;
+
+ for (; i < map->size; i++) {
+ e = list_set_elem(map, i);
+ swap(e->id, id);
+ if (e->id == IPSET_INVALID_ID)
+ break;
+ }
+}
+
+static void
+list_elem_tadd(struct list_set *map, u32 i, ip_set_id_t id,
+ unsigned long timeout)
+{
+ struct set_telem *e;
+
+ for (; i < map->size; i++) {
+ e = (struct set_telem *)list_set_elem(map, i);
+ swap(e->id, id);
+ if (e->id == IPSET_INVALID_ID)
+ break;
+ swap(e->timeout, timeout);
+ }
+}
+
+static int
+list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
+ unsigned long timeout)
+{
+ const struct set_elem *e = list_set_elem(map, i);
+
+ if (i == map->size - 1 && e->id != IPSET_INVALID_ID)
+ /* Last element replaced: e.g. add new,before,last */
+ ip_set_put_byindex(e->id);
+ if (with_timeout(map->timeout))
+ list_elem_tadd(map, i, id, timeout);
+ else
+ list_elem_add(map, i, id);
+
+ return 0;
+}
+
+static int
+list_set_del(struct list_set *map, ip_set_id_t id, u32 i)
+{
+ struct set_elem *a = list_set_elem(map, i), *b;
+
+ ip_set_put_byindex(id);
+
+ for (; i < map->size - 1; i++) {
+ b = list_set_elem(map, i + 1);
+ a->id = b->id;
+ if (with_timeout(map->timeout))
+ ((struct set_telem *)a)->timeout =
+ ((struct set_telem *)b)->timeout;
+ a = b;
+ if (a->id == IPSET_INVALID_ID)
+ break;
+ }
+ /* Last element */
+ a->id = IPSET_INVALID_ID;
+ return 0;
+}
+
+static int
+list_set_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+ struct list_set *map = set->data;
+ bool with_timeout = with_timeout(map->timeout);
+ int before = 0;
+ u32 timeout = map->timeout;
+ ip_set_id_t id, refid = IPSET_INVALID_ID;
+ const struct set_elem *elem;
+ struct ip_set *s;
+ u32 i;
+ int ret = 0;
+
+ if (unlikely(!tb[IPSET_ATTR_NAME] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ id = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAME]), &s);
+ if (id == IPSET_INVALID_ID)
+ return -IPSET_ERR_NAME;
+ /* "Loop detection" */
+ if (s->type->features & IPSET_TYPE_NAME) {
+ ret = -IPSET_ERR_LOOP;
+ goto finish;
+ }
+
+ if (tb[IPSET_ATTR_CADT_FLAGS]) {
+ u32 f = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+ before = f & IPSET_FLAG_BEFORE;
+ }
+
+ if (before && !tb[IPSET_ATTR_NAMEREF]) {
+ ret = -IPSET_ERR_BEFORE;
+ goto finish;
+ }
+
+ if (tb[IPSET_ATTR_NAMEREF]) {
+ refid = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAMEREF]),
+ &s);
+ if (refid == IPSET_INVALID_ID) {
+ ret = -IPSET_ERR_NAMEREF;
+ goto finish;
+ }
+ if (!before)
+ before = -1;
+ }
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!with_timeout) {
+ ret = -IPSET_ERR_TIMEOUT;
+ goto finish;
+ }
+ timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+ }
+
+ switch (adt) {
+ case IPSET_TEST:
+ for (i = 0; i < map->size && !ret; i++) {
+ elem = list_set_elem(map, i);
+ if (elem->id == IPSET_INVALID_ID ||
+ (before != 0 && i + 1 >= map->size))
+ break;
+ else if (with_timeout && list_set_expired(map, i))
+ continue;
+ else if (before > 0 && elem->id == id)
+ ret = next_id_eq(map, i, refid);
+ else if (before < 0 && elem->id == refid)
+ ret = next_id_eq(map, i, id);
+ else if (before == 0 && elem->id == id)
+ ret = 1;
+ }
+ break;
+ case IPSET_ADD:
+ for (i = 0; i < map->size && !ret; i++) {
+ elem = list_set_elem(map, i);
+ if (elem->id == id &&
+ !(with_timeout && list_set_expired(map, i)))
+ ret = -IPSET_ERR_EXIST;
+ }
+ if (ret == -IPSET_ERR_EXIST)
+ break;
+ ret = -IPSET_ERR_LIST_FULL;
+ for (i = 0; i < map->size && ret == -IPSET_ERR_LIST_FULL; i++) {
+ elem = list_set_elem(map, i);
+ if (elem->id == IPSET_INVALID_ID)
+ ret = before != 0 ? -IPSET_ERR_REF_EXIST
+ : list_set_add(map, i, id, timeout);
+ else if (elem->id != refid)
+ continue;
+ else if (with_timeout && list_set_expired(map, i))
+ ret = -IPSET_ERR_REF_EXIST;
+ else if (before)
+ ret = list_set_add(map, i, id, timeout);
+ else if (i + 1 < map->size)
+ ret = list_set_add(map, i + 1, id, timeout);
+ }
+ break;
+ case IPSET_DEL:
+ ret = -IPSET_ERR_EXIST;
+ for (i = 0; i < map->size && ret == -IPSET_ERR_EXIST; i++) {
+ elem = list_set_elem(map, i);
+ if (elem->id == IPSET_INVALID_ID) {
+ ret = before != 0 ? -IPSET_ERR_REF_EXIST
+ : -IPSET_ERR_EXIST;
+ break;
+ } else if (with_timeout && list_set_expired(map, i))
+ continue;
+ else if (elem->id == id &&
+ (before == 0 ||
+ (before > 0 &&
+ next_id_eq(map, i, refid))))
+ ret = list_set_del(map, id, i);
+ else if (before < 0 &&
+ elem->id == refid &&
+ next_id_eq(map, i, id))
+ ret = list_set_del(map, id, i + 1);
+ }
+ break;
+ default:
+ break;
+ }
+
+finish:
+ if (refid != IPSET_INVALID_ID)
+ ip_set_put_byindex(refid);
+ if (adt != IPSET_ADD || ret)
+ ip_set_put_byindex(id);
+
+ return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+static void
+list_set_flush(struct ip_set *set)
+{
+ struct list_set *map = set->data;
+ struct set_elem *elem;
+ u32 i;
+
+ for (i = 0; i < map->size; i++) {
+ elem = list_set_elem(map, i);
+ if (elem->id != IPSET_INVALID_ID) {
+ ip_set_put_byindex(elem->id);
+ elem->id = IPSET_INVALID_ID;
+ }
+ }
+}
+
+static void
+list_set_destroy(struct ip_set *set)
+{
+ struct list_set *map = set->data;
+
+ if (with_timeout(map->timeout))
+ del_timer_sync(&map->gc);
+ list_set_flush(set);
+ kfree(map);
+
+ set->data = NULL;
+}
+
+static int
+list_set_head(struct ip_set *set, struct sk_buff *skb)
+{
+ const struct list_set *map = set->data;
+ struct nlattr *nested;
+
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested)
+ goto nla_put_failure;
+ NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size));
+ if (with_timeout(map->timeout))
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+ NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+ htonl(atomic_read(&set->ref) - 1));
+ NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+ htonl(sizeof(*map) + map->size * map->dsize));
+ ipset_nest_end(skb, nested);
+
+ return 0;
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int
+list_set_list(const struct ip_set *set,
+ struct sk_buff *skb, struct netlink_callback *cb)
+{
+ const struct list_set *map = set->data;
+ struct nlattr *atd, *nested;
+ u32 i, first = cb->args[2];
+ const struct set_elem *e;
+
+ atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+ if (!atd)
+ return -EMSGSIZE;
+ for (; cb->args[2] < map->size; cb->args[2]++) {
+ i = cb->args[2];
+ e = list_set_elem(map, i);
+ if (e->id == IPSET_INVALID_ID)
+ goto finish;
+ if (with_timeout(map->timeout) && list_set_expired(map, i))
+ continue;
+ nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+ if (!nested) {
+ if (i == first) {
+ nla_nest_cancel(skb, atd);
+ return -EMSGSIZE;
+ } else
+ goto nla_put_failure;
+ }
+ NLA_PUT_STRING(skb, IPSET_ATTR_NAME,
+ ip_set_name_byindex(e->id));
+ if (with_timeout(map->timeout)) {
+ const struct set_telem *te =
+ (const struct set_telem *) e;
+ NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+ htonl(ip_set_timeout_get(te->timeout)));
+ }
+ ipset_nest_end(skb, nested);
+ }
+finish:
+ ipset_nest_end(skb, atd);
+ /* Set listing finished */
+ cb->args[2] = 0;
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(skb, nested);
+ ipset_nest_end(skb, atd);
+ if (unlikely(i == first)) {
+ cb->args[2] = 0;
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
+static bool
+list_set_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+ const struct list_set *x = a->data;
+ const struct list_set *y = b->data;
+
+ return x->size == y->size &&
+ x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant list_set = {
+ .kadt = list_set_kadt,
+ .uadt = list_set_uadt,
+ .destroy = list_set_destroy,
+ .flush = list_set_flush,
+ .head = list_set_head,
+ .list = list_set_list,
+ .same_set = list_set_same_set,
+};
+
+static void
+list_set_gc(unsigned long ul_set)
+{
+ struct ip_set *set = (struct ip_set *) ul_set;
+ struct list_set *map = set->data;
+ struct set_telem *e;
+ u32 i;
+
+ /* We run parallel with other readers (test element)
+ * but adding/deleting new entries is locked out */
+ read_lock_bh(&set->lock);
+ for (i = map->size - 1; i >= 0; i--) {
+ e = (struct set_telem *) list_set_elem(map, i);
+ if (e->id != IPSET_INVALID_ID &&
+ list_set_expired(map, i))
+ list_set_del(map, e->id, i);
+ }
+ read_unlock_bh(&set->lock);
+
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+static void
+list_set_gc_init(struct ip_set *set)
+{
+ struct list_set *map = set->data;
+
+ init_timer(&map->gc);
+ map->gc.data = (unsigned long) set;
+ map->gc.function = list_set_gc;
+ map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+ add_timer(&map->gc);
+}
+
+/* Create list:set type of sets */
+
+static bool
+init_list_set(struct ip_set *set, u32 size, size_t dsize,
+ unsigned long timeout)
+{
+ struct list_set *map;
+ struct set_elem *e;
+ u32 i;
+
+ map = kzalloc(sizeof(*map) + size * dsize, GFP_KERNEL);
+ if (!map)
+ return false;
+
+ map->size = size;
+ map->dsize = dsize;
+ map->timeout = timeout;
+ set->data = map;
+
+ for (i = 0; i < size; i++) {
+ e = list_set_elem(map, i);
+ e->id = IPSET_INVALID_ID;
+ }
+
+ return true;
+}
+
+static int
+list_set_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+ u32 size = IP_SET_LIST_DEFAULT_SIZE;
+
+ if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_SIZE) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_SIZE])
+ size = ip_set_get_h32(tb[IPSET_ATTR_SIZE]);
+ if (size < IP_SET_LIST_MIN_SIZE)
+ size = IP_SET_LIST_MIN_SIZE;
+
+ if (tb[IPSET_ATTR_TIMEOUT]) {
+ if (!init_list_set(set, size, sizeof(struct set_telem),
+ ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT])))
+ return -ENOMEM;
+
+ list_set_gc_init(set);
+ } else {
+ if (!init_list_set(set, size, sizeof(struct set_elem),
+ IPSET_NO_TIMEOUT))
+ return -ENOMEM;
+ }
+ set->variant = &list_set;
+ return 0;
+}
+
+static struct ip_set_type list_set_type __read_mostly = {
+ .name = "list:set",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_NAME | IPSET_DUMP_LAST,
+ .dimension = IPSET_DIM_ONE,
+ .family = AF_UNSPEC,
+ .revision = 0,
+ .create = list_set_create,
+ .create_policy = {
+ [IPSET_ATTR_SIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_NAME] = { .type = NLA_STRING,
+ .len = IPSET_MAXNAMELEN },
+ [IPSET_ATTR_NAMEREF] = { .type = NLA_STRING,
+ .len = IPSET_MAXNAMELEN },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+list_set_init(void)
+{
+ return ip_set_type_register(&list_set_type);
+}
+
+static void __exit
+list_set_fini(void)
+{
+ ip_set_type_unregister(&list_set_type);
+}
+
+module_init(list_set_init);
+module_exit(list_set_fini);
diff --git a/net/netfilter/ipset/pfxlen.c b/net/netfilter/ipset/pfxlen.c
new file mode 100644
index 0000000..23f8c81
--- /dev/null
+++ b/net/netfilter/ipset/pfxlen.c
@@ -0,0 +1,291 @@
+#include <linux/netfilter/ipset/pfxlen.h>
+
+/*
+ * Prefixlen maps for fast conversions, by Jan Engelhardt.
+ */
+
+#define E(a, b, c, d) \
+ {.ip6 = { \
+ __constant_htonl(a), __constant_htonl(b), \
+ __constant_htonl(c), __constant_htonl(d), \
+ } }
+
+/*
+ * This table works for both IPv4 and IPv6;
+ * just use prefixlen_netmask_map[prefixlength].ip.
+ */
+const union nf_inet_addr ip_set_netmask_map[] = {
+ E(0x00000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0x80000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xC0000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xE0000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xF0000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xF8000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFC000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFE000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFF000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFF800000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFC00000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFE00000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFF00000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFF80000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFC0000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFE0000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFF8000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFC000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFE000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFF000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFF800, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFC00, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFE00, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFF00, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFF80, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFC0, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFE0, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFF0, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFF8, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFC, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFE, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0x80000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xC0000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xE0000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xF0000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xF8000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFC000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFE000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFF000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFF800000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFC00000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFE00000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFF00000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFF80000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFC0000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFE0000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFF0000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFF8000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFC000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFE000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFF000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFF800, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFC00, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFE00, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFF00, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFF80, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFC0, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFE0, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFF0, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFF8, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFC, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFE, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0x80000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x80000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF),
+};
+EXPORT_SYMBOL_GPL(ip_set_netmask_map);
+
+#undef E
+#define E(a, b, c, d) \
+ {.ip6 = { (__force __be32) a, (__force __be32) b, \
+ (__force __be32) c, (__force __be32) d, \
+ } }
+
+/*
+ * This table works for both IPv4 and IPv6;
+ * just use prefixlen_hostmask_map[prefixlength].ip.
+ */
+const union nf_inet_addr ip_set_hostmask_map[] = {
+ E(0x00000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0x80000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xC0000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xE0000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xF0000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xF8000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFC000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFE000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFF000000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFF800000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFC00000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFE00000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFF00000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFF80000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFC0000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFE0000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFF8000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFC000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFE000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFF000, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFF800, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFC00, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFE00, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFF00, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFF80, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFC0, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFE0, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFF0, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFF8, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFC, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFE, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0x80000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xC0000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xE0000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xF0000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xF8000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFC000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFE000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFF000000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFF800000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFC00000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFE00000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFF00000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFF80000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFC0000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFE0000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFF0000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFF8000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFC000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFE000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFF000, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFF800, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFC00, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFE00, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFF00, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFF80, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFC0, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFE0, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFF0, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFF8, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFC, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFE, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0x80000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x80000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE),
+ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF),
+};
+EXPORT_SYMBOL_GPL(ip_set_hostmask_map);
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index a475ede..5c48ffb 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -43,11 +43,6 @@ EXPORT_SYMBOL(register_ip_vs_app);
EXPORT_SYMBOL(unregister_ip_vs_app);
EXPORT_SYMBOL(register_ip_vs_app_inc);
-/* ipvs application list head */
-static LIST_HEAD(ip_vs_app_list);
-static DEFINE_MUTEX(__ip_vs_app_mutex);
-
-
/*
* Get an ip_vs_app object
*/
@@ -67,7 +62,8 @@ static inline void ip_vs_app_put(struct ip_vs_app *app)
* Allocate/initialize app incarnation and register it in proto apps.
*/
static int
-ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
+ip_vs_app_inc_new(struct net *net, struct ip_vs_app *app, __u16 proto,
+ __u16 port)
{
struct ip_vs_protocol *pp;
struct ip_vs_app *inc;
@@ -98,7 +94,7 @@ ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
}
}
- ret = pp->register_app(inc);
+ ret = pp->register_app(net, inc);
if (ret)
goto out;
@@ -119,7 +115,7 @@ ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
* Release app incarnation
*/
static void
-ip_vs_app_inc_release(struct ip_vs_app *inc)
+ip_vs_app_inc_release(struct net *net, struct ip_vs_app *inc)
{
struct ip_vs_protocol *pp;
@@ -127,7 +123,7 @@ ip_vs_app_inc_release(struct ip_vs_app *inc)
return;
if (pp->unregister_app)
- pp->unregister_app(inc);
+ pp->unregister_app(net, inc);
IP_VS_DBG(9, "%s App %s:%u unregistered\n",
pp->name, inc->name, ntohs(inc->port));
@@ -168,15 +164,17 @@ void ip_vs_app_inc_put(struct ip_vs_app *inc)
* Register an application incarnation in protocol applications
*/
int
-register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port)
+register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto,
+ __u16 port)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
int result;
- mutex_lock(&__ip_vs_app_mutex);
+ mutex_lock(&ipvs->app_mutex);
- result = ip_vs_app_inc_new(app, proto, port);
+ result = ip_vs_app_inc_new(net, app, proto, port);
- mutex_unlock(&__ip_vs_app_mutex);
+ mutex_unlock(&ipvs->app_mutex);
return result;
}
@@ -185,16 +183,17 @@ register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port)
/*
* ip_vs_app registration routine
*/
-int register_ip_vs_app(struct ip_vs_app *app)
+int register_ip_vs_app(struct net *net, struct ip_vs_app *app)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
/* increase the module use count */
ip_vs_use_count_inc();
- mutex_lock(&__ip_vs_app_mutex);
+ mutex_lock(&ipvs->app_mutex);
- list_add(&app->a_list, &ip_vs_app_list);
+ list_add(&app->a_list, &ipvs->app_list);
- mutex_unlock(&__ip_vs_app_mutex);
+ mutex_unlock(&ipvs->app_mutex);
return 0;
}
@@ -204,19 +203,20 @@ int register_ip_vs_app(struct ip_vs_app *app)
* ip_vs_app unregistration routine
* We are sure there are no app incarnations attached to services
*/
-void unregister_ip_vs_app(struct ip_vs_app *app)
+void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_app *inc, *nxt;
- mutex_lock(&__ip_vs_app_mutex);
+ mutex_lock(&ipvs->app_mutex);
list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) {
- ip_vs_app_inc_release(inc);
+ ip_vs_app_inc_release(net, inc);
}
list_del(&app->a_list);
- mutex_unlock(&__ip_vs_app_mutex);
+ mutex_unlock(&ipvs->app_mutex);
/* decrease the module use count */
ip_vs_use_count_dec();
@@ -226,7 +226,8 @@ void unregister_ip_vs_app(struct ip_vs_app *app)
/*
* Bind ip_vs_conn to its ip_vs_app (called by cp constructor)
*/
-int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp)
+int ip_vs_bind_app(struct ip_vs_conn *cp,
+ struct ip_vs_protocol *pp)
{
return pp->app_conn_bind(cp);
}
@@ -481,11 +482,11 @@ int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb)
* /proc/net/ip_vs_app entry function
*/
-static struct ip_vs_app *ip_vs_app_idx(loff_t pos)
+static struct ip_vs_app *ip_vs_app_idx(struct netns_ipvs *ipvs, loff_t pos)
{
struct ip_vs_app *app, *inc;
- list_for_each_entry(app, &ip_vs_app_list, a_list) {
+ list_for_each_entry(app, &ipvs->app_list, a_list) {
list_for_each_entry(inc, &app->incs_list, a_list) {
if (pos-- == 0)
return inc;
@@ -497,19 +498,24 @@ static struct ip_vs_app *ip_vs_app_idx(loff_t pos)
static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos)
{
- mutex_lock(&__ip_vs_app_mutex);
+ struct net *net = seq_file_net(seq);
+ struct netns_ipvs *ipvs = net_ipvs(net);
- return *pos ? ip_vs_app_idx(*pos - 1) : SEQ_START_TOKEN;
+ mutex_lock(&ipvs->app_mutex);
+
+ return *pos ? ip_vs_app_idx(ipvs, *pos - 1) : SEQ_START_TOKEN;
}
static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ip_vs_app *inc, *app;
struct list_head *e;
+ struct net *net = seq_file_net(seq);
+ struct netns_ipvs *ipvs = net_ipvs(net);
++*pos;
if (v == SEQ_START_TOKEN)
- return ip_vs_app_idx(0);
+ return ip_vs_app_idx(ipvs, 0);
inc = v;
app = inc->app;
@@ -518,7 +524,7 @@ static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
return list_entry(e, struct ip_vs_app, a_list);
/* go on to next application */
- for (e = app->a_list.next; e != &ip_vs_app_list; e = e->next) {
+ for (e = app->a_list.next; e != &ipvs->app_list; e = e->next) {
app = list_entry(e, struct ip_vs_app, a_list);
list_for_each_entry(inc, &app->incs_list, a_list) {
return inc;
@@ -529,7 +535,9 @@ static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void ip_vs_app_seq_stop(struct seq_file *seq, void *v)
{
- mutex_unlock(&__ip_vs_app_mutex);
+ struct netns_ipvs *ipvs = net_ipvs(seq_file_net(seq));
+
+ mutex_unlock(&ipvs->app_mutex);
}
static int ip_vs_app_seq_show(struct seq_file *seq, void *v)
@@ -557,7 +565,8 @@ static const struct seq_operations ip_vs_app_seq_ops = {
static int ip_vs_app_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &ip_vs_app_seq_ops);
+ return seq_open_net(inode, file, &ip_vs_app_seq_ops,
+ sizeof(struct seq_net_private));
}
static const struct file_operations ip_vs_app_fops = {
@@ -569,15 +578,36 @@ static const struct file_operations ip_vs_app_fops = {
};
#endif
-int __init ip_vs_app_init(void)
+static int __net_init __ip_vs_app_init(struct net *net)
{
- /* we will replace it with proc_net_ipvs_create() soon */
- proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ INIT_LIST_HEAD(&ipvs->app_list);
+ __mutex_init(&ipvs->app_mutex, "ipvs->app_mutex", &ipvs->app_key);
+ proc_net_fops_create(net, "ip_vs_app", 0, &ip_vs_app_fops);
return 0;
}
+static void __net_exit __ip_vs_app_cleanup(struct net *net)
+{
+ proc_net_remove(net, "ip_vs_app");
+}
+
+static struct pernet_operations ip_vs_app_ops = {
+ .init = __ip_vs_app_init,
+ .exit = __ip_vs_app_cleanup,
+};
+
+int __init ip_vs_app_init(void)
+{
+ int rv;
+
+ rv = register_pernet_subsys(&ip_vs_app_ops);
+ return rv;
+}
+
void ip_vs_app_cleanup(void)
{
- proc_net_remove(&init_net, "ip_vs_app");
+ unregister_pernet_subsys(&ip_vs_app_ops);
}
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index e9adecd..83233fe 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -48,35 +48,32 @@
/*
* Connection hash size. Default is what was selected at compile time.
*/
-int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
+static int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
module_param_named(conn_tab_bits, ip_vs_conn_tab_bits, int, 0444);
MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size");
/* size and mask values */
-int ip_vs_conn_tab_size;
-int ip_vs_conn_tab_mask;
+int ip_vs_conn_tab_size __read_mostly;
+static int ip_vs_conn_tab_mask __read_mostly;
/*
* Connection hash table: for input and output packets lookups of IPVS
*/
-static struct list_head *ip_vs_conn_tab;
+static struct list_head *ip_vs_conn_tab __read_mostly;
/* SLAB cache for IPVS connections */
static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
-/* counter for current IPVS connections */
-static atomic_t ip_vs_conn_count = ATOMIC_INIT(0);
-
/* counter for no client port connections */
static atomic_t ip_vs_conn_no_cport_cnt = ATOMIC_INIT(0);
/* random value for IPVS connection hash */
-static unsigned int ip_vs_conn_rnd;
+static unsigned int ip_vs_conn_rnd __read_mostly;
/*
* Fine locking granularity for big connection hash table
*/
-#define CT_LOCKARRAY_BITS 4
+#define CT_LOCKARRAY_BITS 5
#define CT_LOCKARRAY_SIZE (1<<CT_LOCKARRAY_BITS)
#define CT_LOCKARRAY_MASK (CT_LOCKARRAY_SIZE-1)
@@ -133,19 +130,19 @@ static inline void ct_write_unlock_bh(unsigned key)
/*
* Returns hash value for IPVS connection entry
*/
-static unsigned int ip_vs_conn_hashkey(int af, unsigned proto,
+static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned proto,
const union nf_inet_addr *addr,
__be16 port)
{
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
- return jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
- (__force u32)port, proto, ip_vs_conn_rnd)
- & ip_vs_conn_tab_mask;
+ return (jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
+ (__force u32)port, proto, ip_vs_conn_rnd) ^
+ ((size_t)net>>8)) & ip_vs_conn_tab_mask;
#endif
- return jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
- ip_vs_conn_rnd)
- & ip_vs_conn_tab_mask;
+ return (jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
+ ip_vs_conn_rnd) ^
+ ((size_t)net>>8)) & ip_vs_conn_tab_mask;
}
static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p,
@@ -166,18 +163,18 @@ static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p,
port = p->vport;
}
- return ip_vs_conn_hashkey(p->af, p->protocol, addr, port);
+ return ip_vs_conn_hashkey(p->net, p->af, p->protocol, addr, port);
}
static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
{
struct ip_vs_conn_param p;
- ip_vs_conn_fill_param(cp->af, cp->protocol, &cp->caddr, cp->cport,
- NULL, 0, &p);
+ ip_vs_conn_fill_param(ip_vs_conn_net(cp), cp->af, cp->protocol,
+ &cp->caddr, cp->cport, NULL, 0, &p);
- if (cp->dest && cp->dest->svc->pe) {
- p.pe = cp->dest->svc->pe;
+ if (cp->pe) {
+ p.pe = cp->pe;
p.pe_data = cp->pe_data;
p.pe_data_len = cp->pe_data_len;
}
@@ -186,7 +183,7 @@ static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
}
/*
- * Hashes ip_vs_conn in ip_vs_conn_tab by proto,addr,port.
+ * Hashes ip_vs_conn in ip_vs_conn_tab by netns,proto,addr,port.
* returns bool success.
*/
static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
@@ -269,11 +266,12 @@ __ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
if (cp->af == p->af &&
+ p->cport == cp->cport && p->vport == cp->vport &&
ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) &&
- p->cport == cp->cport && p->vport == cp->vport &&
((!p->cport) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
- p->protocol == cp->protocol) {
+ p->protocol == cp->protocol &&
+ ip_vs_conn_net_eq(cp, p->net)) {
/* HIT */
atomic_inc(&cp->refcnt);
ct_read_unlock(hash);
@@ -313,23 +311,23 @@ ip_vs_conn_fill_param_proto(int af, const struct sk_buff *skb,
struct ip_vs_conn_param *p)
{
__be16 _ports[2], *pptr;
+ struct net *net = skb_net(skb);
pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
if (pptr == NULL)
return 1;
if (likely(!inverse))
- ip_vs_conn_fill_param(af, iph->protocol, &iph->saddr, pptr[0],
- &iph->daddr, pptr[1], p);
+ ip_vs_conn_fill_param(net, af, iph->protocol, &iph->saddr,
+ pptr[0], &iph->daddr, pptr[1], p);
else
- ip_vs_conn_fill_param(af, iph->protocol, &iph->daddr, pptr[1],
- &iph->saddr, pptr[0], p);
+ ip_vs_conn_fill_param(net, af, iph->protocol, &iph->daddr,
+ pptr[1], &iph->saddr, pptr[0], p);
return 0;
}
struct ip_vs_conn *
ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
unsigned int proto_off, int inverse)
{
@@ -353,8 +351,10 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
ct_read_lock(hash);
list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
+ if (!ip_vs_conn_net_eq(cp, p->net))
+ continue;
if (p->pe_data && p->pe->ct_match) {
- if (p->pe->ct_match(p, cp))
+ if (p->pe == cp->pe && p->pe->ct_match(p, cp))
goto out;
continue;
}
@@ -404,10 +404,11 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
if (cp->af == p->af &&
+ p->vport == cp->cport && p->cport == cp->dport &&
ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
ip_vs_addr_equal(p->af, p->caddr, &cp->daddr) &&
- p->vport == cp->cport && p->cport == cp->dport &&
- p->protocol == cp->protocol) {
+ p->protocol == cp->protocol &&
+ ip_vs_conn_net_eq(cp, p->net)) {
/* HIT */
atomic_inc(&cp->refcnt);
ret = cp;
@@ -428,7 +429,6 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
struct ip_vs_conn *
ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
unsigned int proto_off, int inverse)
{
@@ -611,9 +611,9 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
struct ip_vs_dest *dest;
if ((cp) && (!cp->dest)) {
- dest = ip_vs_find_dest(cp->af, &cp->daddr, cp->dport,
- &cp->vaddr, cp->vport,
- cp->protocol);
+ dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr,
+ cp->dport, &cp->vaddr, cp->vport,
+ cp->protocol, cp->fwmark);
ip_vs_bind_dest(cp, dest);
return dest;
} else
@@ -686,13 +686,14 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
int ip_vs_check_template(struct ip_vs_conn *ct)
{
struct ip_vs_dest *dest = ct->dest;
+ struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(ct));
/*
* Checking the dest server status.
*/
if ((dest == NULL) ||
!(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
- (sysctl_ip_vs_expire_quiescent_template &&
+ (ipvs->sysctl_expire_quiescent_template &&
(atomic_read(&dest->weight) == 0))) {
IP_VS_DBG_BUF(9, "check_template: dest not available for "
"protocol %s s:%s:%d v:%s:%d "
@@ -730,6 +731,7 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
static void ip_vs_conn_expire(unsigned long data)
{
struct ip_vs_conn *cp = (struct ip_vs_conn *)data;
+ struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
cp->timeout = 60*HZ;
@@ -765,13 +767,14 @@ static void ip_vs_conn_expire(unsigned long data)
if (cp->flags & IP_VS_CONN_F_NFCT)
ip_vs_conn_drop_conntrack(cp);
+ ip_vs_pe_put(cp->pe);
kfree(cp->pe_data);
if (unlikely(cp->app != NULL))
ip_vs_unbind_app(cp);
ip_vs_unbind_dest(cp);
if (cp->flags & IP_VS_CONN_F_NO_CPORT)
atomic_dec(&ip_vs_conn_no_cport_cnt);
- atomic_dec(&ip_vs_conn_count);
+ atomic_dec(&ipvs->conn_count);
kmem_cache_free(ip_vs_conn_cachep, cp);
return;
@@ -802,10 +805,12 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
struct ip_vs_conn *
ip_vs_conn_new(const struct ip_vs_conn_param *p,
const union nf_inet_addr *daddr, __be16 dport, unsigned flags,
- struct ip_vs_dest *dest)
+ struct ip_vs_dest *dest, __u32 fwmark)
{
struct ip_vs_conn *cp;
- struct ip_vs_protocol *pp = ip_vs_proto_get(p->protocol);
+ struct netns_ipvs *ipvs = net_ipvs(p->net);
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->net,
+ p->protocol);
cp = kmem_cache_zalloc(ip_vs_conn_cachep, GFP_ATOMIC);
if (cp == NULL) {
@@ -815,6 +820,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
INIT_LIST_HEAD(&cp->c_list);
setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp);
+ ip_vs_conn_net_set(cp, p->net);
cp->af = p->af;
cp->protocol = p->protocol;
ip_vs_addr_copy(p->af, &cp->caddr, p->caddr);
@@ -826,7 +832,10 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
&cp->daddr, daddr);
cp->dport = dport;
cp->flags = flags;
- if (flags & IP_VS_CONN_F_TEMPLATE && p->pe_data) {
+ cp->fwmark = fwmark;
+ if (flags & IP_VS_CONN_F_TEMPLATE && p->pe) {
+ ip_vs_pe_get(p->pe);
+ cp->pe = p->pe;
cp->pe_data = p->pe_data;
cp->pe_data_len = p->pe_data_len;
}
@@ -842,7 +851,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
atomic_set(&cp->n_control, 0);
atomic_set(&cp->in_pkts, 0);
- atomic_inc(&ip_vs_conn_count);
+ atomic_inc(&ipvs->conn_count);
if (flags & IP_VS_CONN_F_NO_CPORT)
atomic_inc(&ip_vs_conn_no_cport_cnt);
@@ -861,8 +870,8 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
#endif
ip_vs_bind_xmit(cp);
- if (unlikely(pp && atomic_read(&pp->appcnt)))
- ip_vs_bind_app(cp, pp);
+ if (unlikely(pd && atomic_read(&pd->appcnt)))
+ ip_vs_bind_app(cp, pd->pp);
/*
* Allow conntrack to be preserved. By default, conntrack
@@ -871,7 +880,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
* IP_VS_CONN_F_ONE_PACKET too.
*/
- if (ip_vs_conntrack_enabled())
+ if (ip_vs_conntrack_enabled(ipvs))
cp->flags |= IP_VS_CONN_F_NFCT;
/* Hash it in the ip_vs_conn_tab finally */
@@ -884,17 +893,22 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
* /proc/net/ip_vs_conn entries
*/
#ifdef CONFIG_PROC_FS
+struct ip_vs_iter_state {
+ struct seq_net_private p;
+ struct list_head *l;
+};
static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
{
int idx;
struct ip_vs_conn *cp;
+ struct ip_vs_iter_state *iter = seq->private;
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
ct_read_lock_bh(idx);
list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
if (pos-- == 0) {
- seq->private = &ip_vs_conn_tab[idx];
+ iter->l = &ip_vs_conn_tab[idx];
return cp;
}
}
@@ -906,14 +920,17 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
static void *ip_vs_conn_seq_start(struct seq_file *seq, loff_t *pos)
{
- seq->private = NULL;
+ struct ip_vs_iter_state *iter = seq->private;
+
+ iter->l = NULL;
return *pos ? ip_vs_conn_array(seq, *pos - 1) :SEQ_START_TOKEN;
}
static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ip_vs_conn *cp = v;
- struct list_head *e, *l = seq->private;
+ struct ip_vs_iter_state *iter = seq->private;
+ struct list_head *e, *l = iter->l;
int idx;
++*pos;
@@ -930,18 +947,19 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
while (++idx < ip_vs_conn_tab_size) {
ct_read_lock_bh(idx);
list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
- seq->private = &ip_vs_conn_tab[idx];
+ iter->l = &ip_vs_conn_tab[idx];
return cp;
}
ct_read_unlock_bh(idx);
}
- seq->private = NULL;
+ iter->l = NULL;
return NULL;
}
static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v)
{
- struct list_head *l = seq->private;
+ struct ip_vs_iter_state *iter = seq->private;
+ struct list_head *l = iter->l;
if (l)
ct_read_unlock_bh(l - ip_vs_conn_tab);
@@ -955,18 +973,19 @@ static int ip_vs_conn_seq_show(struct seq_file *seq, void *v)
"Pro FromIP FPrt ToIP TPrt DestIP DPrt State Expires PEName PEData\n");
else {
const struct ip_vs_conn *cp = v;
+ struct net *net = seq_file_net(seq);
char pe_data[IP_VS_PENAME_MAXLEN + IP_VS_PEDATA_MAXLEN + 3];
size_t len = 0;
- if (cp->dest && cp->pe_data &&
- cp->dest->svc->pe->show_pe_data) {
+ if (!ip_vs_conn_net_eq(cp, net))
+ return 0;
+ if (cp->pe_data) {
pe_data[0] = ' ';
- len = strlen(cp->dest->svc->pe->name);
- memcpy(pe_data + 1, cp->dest->svc->pe->name, len);
+ len = strlen(cp->pe->name);
+ memcpy(pe_data + 1, cp->pe->name, len);
pe_data[len + 1] = ' ';
len += 2;
- len += cp->dest->svc->pe->show_pe_data(cp,
- pe_data + len);
+ len += cp->pe->show_pe_data(cp, pe_data + len);
}
pe_data[len] = '\0';
@@ -1004,7 +1023,8 @@ static const struct seq_operations ip_vs_conn_seq_ops = {
static int ip_vs_conn_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &ip_vs_conn_seq_ops);
+ return seq_open_net(inode, file, &ip_vs_conn_seq_ops,
+ sizeof(struct ip_vs_iter_state));
}
static const struct file_operations ip_vs_conn_fops = {
@@ -1031,6 +1051,10 @@ static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v)
"Pro FromIP FPrt ToIP TPrt DestIP DPrt State Origin Expires\n");
else {
const struct ip_vs_conn *cp = v;
+ struct net *net = seq_file_net(seq);
+
+ if (!ip_vs_conn_net_eq(cp, net))
+ return 0;
#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
@@ -1067,7 +1091,8 @@ static const struct seq_operations ip_vs_conn_sync_seq_ops = {
static int ip_vs_conn_sync_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &ip_vs_conn_sync_seq_ops);
+ return seq_open_net(inode, file, &ip_vs_conn_sync_seq_ops,
+ sizeof(struct ip_vs_iter_state));
}
static const struct file_operations ip_vs_conn_sync_fops = {
@@ -1113,7 +1138,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
}
/* Called from keventd and must protect itself from softirqs */
-void ip_vs_random_dropentry(void)
+void ip_vs_random_dropentry(struct net *net)
{
int idx;
struct ip_vs_conn *cp;
@@ -1133,7 +1158,8 @@ void ip_vs_random_dropentry(void)
if (cp->flags & IP_VS_CONN_F_TEMPLATE)
/* connection template */
continue;
-
+ if (!ip_vs_conn_net_eq(cp, net))
+ continue;
if (cp->protocol == IPPROTO_TCP) {
switch(cp->state) {
case IP_VS_TCP_S_SYN_RECV:
@@ -1168,12 +1194,13 @@ void ip_vs_random_dropentry(void)
/*
* Flush all the connection entries in the ip_vs_conn_tab
*/
-static void ip_vs_conn_flush(void)
+static void ip_vs_conn_flush(struct net *net)
{
int idx;
struct ip_vs_conn *cp;
+ struct netns_ipvs *ipvs = net_ipvs(net);
- flush_again:
+flush_again:
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
/*
* Lock is actually needed in this loop.
@@ -1181,7 +1208,8 @@ static void ip_vs_conn_flush(void)
ct_write_lock_bh(idx);
list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
-
+ if (!ip_vs_conn_net_eq(cp, net))
+ continue;
IP_VS_DBG(4, "del connection\n");
ip_vs_conn_expire_now(cp);
if (cp->control) {
@@ -1194,16 +1222,41 @@ static void ip_vs_conn_flush(void)
/* the counter may be not NULL, because maybe some conn entries
are run by slow timer handler or unhashed but still referred */
- if (atomic_read(&ip_vs_conn_count) != 0) {
+ if (atomic_read(&ipvs->conn_count) != 0) {
schedule();
goto flush_again;
}
}
+/*
+ * per netns init and exit
+ */
+int __net_init __ip_vs_conn_init(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ atomic_set(&ipvs->conn_count, 0);
+
+ proc_net_fops_create(net, "ip_vs_conn", 0, &ip_vs_conn_fops);
+ proc_net_fops_create(net, "ip_vs_conn_sync", 0, &ip_vs_conn_sync_fops);
+ return 0;
+}
+static void __net_exit __ip_vs_conn_cleanup(struct net *net)
+{
+ /* flush all the connection entries first */
+ ip_vs_conn_flush(net);
+ proc_net_remove(net, "ip_vs_conn");
+ proc_net_remove(net, "ip_vs_conn_sync");
+}
+static struct pernet_operations ipvs_conn_ops = {
+ .init = __ip_vs_conn_init,
+ .exit = __ip_vs_conn_cleanup,
+};
int __init ip_vs_conn_init(void)
{
int idx;
+ int retc;
/* Compute size and mask */
ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
@@ -1241,24 +1294,18 @@ int __init ip_vs_conn_init(void)
rwlock_init(&__ip_vs_conntbl_lock_array[idx].l);
}
- proc_net_fops_create(&init_net, "ip_vs_conn", 0, &ip_vs_conn_fops);
- proc_net_fops_create(&init_net, "ip_vs_conn_sync", 0, &ip_vs_conn_sync_fops);
+ retc = register_pernet_subsys(&ipvs_conn_ops);
/* calculate the random value for connection hash */
get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd));
- return 0;
+ return retc;
}
-
void ip_vs_conn_cleanup(void)
{
- /* flush all the connection entries first */
- ip_vs_conn_flush();
-
+ unregister_pernet_subsys(&ipvs_conn_ops);
/* Release the empty cache */
kmem_cache_destroy(ip_vs_conn_cachep);
- proc_net_remove(&init_net, "ip_vs_conn");
- proc_net_remove(&init_net, "ip_vs_conn_sync");
vfree(ip_vs_conn_tab);
}
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index b4e51e9..4d06617 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -41,6 +41,7 @@
#include <net/icmp.h> /* for icmp_send */
#include <net/route.h>
#include <net/ip6_checksum.h>
+#include <net/netns/generic.h> /* net_generic() */
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
@@ -68,6 +69,12 @@ EXPORT_SYMBOL(ip_vs_conn_put);
EXPORT_SYMBOL(ip_vs_get_debug_level);
#endif
+int ip_vs_net_id __read_mostly;
+#ifdef IP_VS_GENERIC_NETNS
+EXPORT_SYMBOL(ip_vs_net_id);
+#endif
+/* netns cnt used for uniqueness */
+static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
/* ID used in ICMP lookups */
#define icmp_id(icmph) (((icmph)->un).echo.id)
@@ -108,21 +115,28 @@ static inline void
ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
{
struct ip_vs_dest *dest = cp->dest;
+ struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
+
if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
- spin_lock(&dest->stats.lock);
- dest->stats.ustats.inpkts++;
- dest->stats.ustats.inbytes += skb->len;
- spin_unlock(&dest->stats.lock);
-
- spin_lock(&dest->svc->stats.lock);
- dest->svc->stats.ustats.inpkts++;
- dest->svc->stats.ustats.inbytes += skb->len;
- spin_unlock(&dest->svc->stats.lock);
-
- spin_lock(&ip_vs_stats.lock);
- ip_vs_stats.ustats.inpkts++;
- ip_vs_stats.ustats.inbytes += skb->len;
- spin_unlock(&ip_vs_stats.lock);
+ struct ip_vs_cpu_stats *s;
+
+ s = this_cpu_ptr(dest->stats.cpustats);
+ s->ustats.inpkts++;
+ u64_stats_update_begin(&s->syncp);
+ s->ustats.inbytes += skb->len;
+ u64_stats_update_end(&s->syncp);
+
+ s = this_cpu_ptr(dest->svc->stats.cpustats);
+ s->ustats.inpkts++;
+ u64_stats_update_begin(&s->syncp);
+ s->ustats.inbytes += skb->len;
+ u64_stats_update_end(&s->syncp);
+
+ s = this_cpu_ptr(ipvs->cpustats);
+ s->ustats.inpkts++;
+ u64_stats_update_begin(&s->syncp);
+ s->ustats.inbytes += skb->len;
+ u64_stats_update_end(&s->syncp);
}
}
@@ -131,21 +145,28 @@ static inline void
ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
{
struct ip_vs_dest *dest = cp->dest;
+ struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
+
if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
- spin_lock(&dest->stats.lock);
- dest->stats.ustats.outpkts++;
- dest->stats.ustats.outbytes += skb->len;
- spin_unlock(&dest->stats.lock);
-
- spin_lock(&dest->svc->stats.lock);
- dest->svc->stats.ustats.outpkts++;
- dest->svc->stats.ustats.outbytes += skb->len;
- spin_unlock(&dest->svc->stats.lock);
-
- spin_lock(&ip_vs_stats.lock);
- ip_vs_stats.ustats.outpkts++;
- ip_vs_stats.ustats.outbytes += skb->len;
- spin_unlock(&ip_vs_stats.lock);
+ struct ip_vs_cpu_stats *s;
+
+ s = this_cpu_ptr(dest->stats.cpustats);
+ s->ustats.outpkts++;
+ u64_stats_update_begin(&s->syncp);
+ s->ustats.outbytes += skb->len;
+ u64_stats_update_end(&s->syncp);
+
+ s = this_cpu_ptr(dest->svc->stats.cpustats);
+ s->ustats.outpkts++;
+ u64_stats_update_begin(&s->syncp);
+ s->ustats.outbytes += skb->len;
+ u64_stats_update_end(&s->syncp);
+
+ s = this_cpu_ptr(ipvs->cpustats);
+ s->ustats.outpkts++;
+ u64_stats_update_begin(&s->syncp);
+ s->ustats.outbytes += skb->len;
+ u64_stats_update_end(&s->syncp);
}
}
@@ -153,41 +174,44 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
static inline void
ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
{
- spin_lock(&cp->dest->stats.lock);
- cp->dest->stats.ustats.conns++;
- spin_unlock(&cp->dest->stats.lock);
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
+ struct ip_vs_cpu_stats *s;
+
+ s = this_cpu_ptr(cp->dest->stats.cpustats);
+ s->ustats.conns++;
- spin_lock(&svc->stats.lock);
- svc->stats.ustats.conns++;
- spin_unlock(&svc->stats.lock);
+ s = this_cpu_ptr(svc->stats.cpustats);
+ s->ustats.conns++;
- spin_lock(&ip_vs_stats.lock);
- ip_vs_stats.ustats.conns++;
- spin_unlock(&ip_vs_stats.lock);
+ s = this_cpu_ptr(ipvs->cpustats);
+ s->ustats.conns++;
}
static inline int
ip_vs_set_state(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb,
- struct ip_vs_protocol *pp)
+ struct ip_vs_proto_data *pd)
{
- if (unlikely(!pp->state_transition))
+ if (unlikely(!pd->pp->state_transition))
return 0;
- return pp->state_transition(cp, direction, skb, pp);
+ return pd->pp->state_transition(cp, direction, skb, pd);
}
-static inline void
+static inline int
ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
struct sk_buff *skb, int protocol,
const union nf_inet_addr *caddr, __be16 cport,
const union nf_inet_addr *vaddr, __be16 vport,
struct ip_vs_conn_param *p)
{
- ip_vs_conn_fill_param(svc->af, protocol, caddr, cport, vaddr, vport, p);
+ ip_vs_conn_fill_param(svc->net, svc->af, protocol, caddr, cport, vaddr,
+ vport, p);
p->pe = svc->pe;
if (p->pe && p->pe->fill_param)
- p->pe->fill_param(p, skb);
+ return p->pe->fill_param(p, skb);
+
+ return 0;
}
/*
@@ -200,7 +224,7 @@ ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
static struct ip_vs_conn *
ip_vs_sched_persist(struct ip_vs_service *svc,
struct sk_buff *skb,
- __be16 ports[2])
+ __be16 src_port, __be16 dst_port, int *ignored)
{
struct ip_vs_conn *cp = NULL;
struct ip_vs_iphdr iph;
@@ -224,8 +248,8 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
"mnet %s\n",
- IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(ports[0]),
- IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(ports[1]),
+ IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(src_port),
+ IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(dst_port),
IP_VS_DBG_ADDR(svc->af, &snet));
/*
@@ -247,14 +271,14 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
__be16 vport = 0;
- if (ports[1] == svc->port) {
+ if (dst_port == svc->port) {
/* non-FTP template:
* <protocol, caddr, 0, vaddr, vport, daddr, dport>
* FTP template:
* <protocol, caddr, 0, vaddr, 0, daddr, 0>
*/
if (svc->port != FTPPORT)
- vport = ports[1];
+ vport = dst_port;
} else {
/* Note: persistent fwmark-based services and
* persistent port zero service are handled here.
@@ -268,24 +292,31 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
vaddr = &fwmark;
}
}
- ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
- vaddr, vport, &param);
+ /* return *ignored = -1 so NF_DROP can be used */
+ if (ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
+ vaddr, vport, &param) < 0) {
+ *ignored = -1;
+ return NULL;
+ }
}
/* Check if a template already exists */
ct = ip_vs_ct_in_get(&param);
if (!ct || !ip_vs_check_template(ct)) {
- /* No template found or the dest of the connection
+ /*
+ * No template found or the dest of the connection
* template is not available.
+ * return *ignored=0 i.e. ICMP and NF_DROP
*/
dest = svc->scheduler->schedule(svc, skb);
if (!dest) {
IP_VS_DBG(1, "p-schedule: no dest found.\n");
kfree(param.pe_data);
+ *ignored = 0;
return NULL;
}
- if (ports[1] == svc->port && svc->port != FTPPORT)
+ if (dst_port == svc->port && svc->port != FTPPORT)
dport = dest->port;
/* Create a template
@@ -293,9 +324,10 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
* and thus param.pe_data will be destroyed
* when the template expires */
ct = ip_vs_conn_new(&param, &dest->addr, dport,
- IP_VS_CONN_F_TEMPLATE, dest);
+ IP_VS_CONN_F_TEMPLATE, dest, skb->mark);
if (ct == NULL) {
kfree(param.pe_data);
+ *ignored = -1;
return NULL;
}
@@ -306,7 +338,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
kfree(param.pe_data);
}
- dport = ports[1];
+ dport = dst_port;
if (dport == svc->port && dest->port)
dport = dest->port;
@@ -317,11 +349,13 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
/*
* Create a new connection according to the template
*/
- ip_vs_conn_fill_param(svc->af, iph.protocol, &iph.saddr, ports[0],
- &iph.daddr, ports[1], &param);
- cp = ip_vs_conn_new(&param, &dest->addr, dport, flags, dest);
+ ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol, &iph.saddr,
+ src_port, &iph.daddr, dst_port, &param);
+
+ cp = ip_vs_conn_new(&param, &dest->addr, dport, flags, dest, skb->mark);
if (cp == NULL) {
ip_vs_conn_put(ct);
+ *ignored = -1;
return NULL;
}
@@ -341,11 +375,27 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
* It selects a server according to the virtual service, and
* creates a connection entry.
* Protocols supported: TCP, UDP
+ *
+ * Usage of *ignored
+ *
+ * 1 : protocol tried to schedule (eg. on SYN), found svc but the
+ * svc/scheduler decides that this packet should be accepted with
+ * NF_ACCEPT because it must not be scheduled.
+ *
+ * 0 : scheduler can not find destination, so try bypass or
+ * return ICMP and then NF_DROP (ip_vs_leave).
+ *
+ * -1 : scheduler tried to schedule but fatal error occurred, eg.
+ * ip_vs_conn_new failure (ENOMEM) or ip_vs_sip_fill_param
+ * failure such as missing Call-ID, ENOMEM on skb_linearize
+ * or pe_data. In this case we should return NF_DROP without
+ * any attempts to send ICMP with ip_vs_leave.
*/
struct ip_vs_conn *
ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
- struct ip_vs_protocol *pp, int *ignored)
+ struct ip_vs_proto_data *pd, int *ignored)
{
+ struct ip_vs_protocol *pp = pd->pp;
struct ip_vs_conn *cp = NULL;
struct ip_vs_iphdr iph;
struct ip_vs_dest *dest;
@@ -371,12 +421,10 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
}
/*
- * Do not schedule replies from local real server. It is risky
- * for fwmark services but mostly for persistent services.
+ * Do not schedule replies from local real server.
*/
if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
- (svc->flags & IP_VS_SVC_F_PERSISTENT || svc->fwmark) &&
- (cp = pp->conn_in_get(svc->af, skb, pp, &iph, iph.len, 1))) {
+ (cp = pp->conn_in_get(svc->af, skb, &iph, iph.len, 1))) {
IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
"Not scheduling reply for existing connection");
__ip_vs_conn_put(cp);
@@ -386,10 +434,10 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
/*
* Persistent service
*/
- if (svc->flags & IP_VS_SVC_F_PERSISTENT) {
- *ignored = 0;
- return ip_vs_sched_persist(svc, skb, pptr);
- }
+ if (svc->flags & IP_VS_SVC_F_PERSISTENT)
+ return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored);
+
+ *ignored = 0;
/*
* Non-persistent service
@@ -402,8 +450,6 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
return NULL;
}
- *ignored = 0;
-
dest = svc->scheduler->schedule(svc, skb);
if (dest == NULL) {
IP_VS_DBG(1, "Schedule: no dest found.\n");
@@ -419,13 +465,17 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
*/
{
struct ip_vs_conn_param p;
- ip_vs_conn_fill_param(svc->af, iph.protocol, &iph.saddr,
- pptr[0], &iph.daddr, pptr[1], &p);
+
+ ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol,
+ &iph.saddr, pptr[0], &iph.daddr, pptr[1],
+ &p);
cp = ip_vs_conn_new(&p, &dest->addr,
dest->port ? dest->port : pptr[1],
- flags, dest);
- if (!cp)
+ flags, dest, skb->mark);
+ if (!cp) {
+ *ignored = -1;
return NULL;
+ }
}
IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
@@ -447,11 +497,14 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
* no destination is available for a new connection.
*/
int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
- struct ip_vs_protocol *pp)
+ struct ip_vs_proto_data *pd)
{
+ struct net *net;
+ struct netns_ipvs *ipvs;
__be16 _ports[2], *pptr;
struct ip_vs_iphdr iph;
int unicast;
+
ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
@@ -459,18 +512,20 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
ip_vs_service_put(svc);
return NF_DROP;
}
+ net = skb_net(skb);
#ifdef CONFIG_IP_VS_IPV6
if (svc->af == AF_INET6)
unicast = ipv6_addr_type(&iph.daddr.in6) & IPV6_ADDR_UNICAST;
else
#endif
- unicast = (inet_addr_type(&init_net, iph.daddr.ip) == RTN_UNICAST);
+ unicast = (inet_addr_type(net, iph.daddr.ip) == RTN_UNICAST);
/* if it is fwmark-based service, the cache_bypass sysctl is up
and the destination is a non-local unicast, then create
a cache_bypass connection entry */
- if (sysctl_ip_vs_cache_bypass && svc->fwmark && unicast) {
+ ipvs = net_ipvs(net);
+ if (ipvs->sysctl_cache_bypass && svc->fwmark && unicast) {
int ret, cs;
struct ip_vs_conn *cp;
unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
@@ -484,12 +539,12 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
{
struct ip_vs_conn_param p;
- ip_vs_conn_fill_param(svc->af, iph.protocol,
+ ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol,
&iph.saddr, pptr[0],
&iph.daddr, pptr[1], &p);
cp = ip_vs_conn_new(&p, &daddr, 0,
IP_VS_CONN_F_BYPASS | flags,
- NULL);
+ NULL, skb->mark);
if (!cp)
return NF_DROP;
}
@@ -498,10 +553,10 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
ip_vs_in_stats(cp, skb);
/* set state */
- cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pp);
+ cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
/* transmit the first SYN packet */
- ret = cp->packet_xmit(skb, cp, pp);
+ ret = cp->packet_xmit(skb, cp, pd->pp);
/* do not touch skb anymore */
atomic_inc(&cp->in_pkts);
@@ -682,6 +737,7 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
struct ip_vs_protocol *pp,
unsigned int offset, unsigned int ihl)
{
+ struct netns_ipvs *ipvs;
unsigned int verdict = NF_DROP;
if (IP_VS_FWD_METHOD(cp) != 0) {
@@ -703,6 +759,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
if (!skb_make_writable(skb, offset))
goto out;
+ ipvs = net_ipvs(skb_net(skb));
+
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
ip_vs_nat_icmp_v6(skb, pp, cp, 1);
@@ -712,11 +770,11 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
- if (sysctl_ip_vs_snat_reroute && ip6_route_me_harder(skb) != 0)
+ if (ipvs->sysctl_snat_reroute && ip6_route_me_harder(skb) != 0)
goto out;
} else
#endif
- if ((sysctl_ip_vs_snat_reroute ||
+ if ((ipvs->sysctl_snat_reroute ||
skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
ip_route_me_harder(skb, RTN_LOCAL) != 0)
goto out;
@@ -808,7 +866,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
ip_vs_fill_iphdr(AF_INET, cih, &ciph);
/* The embedded headers contain source and dest in reverse order */
- cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1);
+ cp = pp->conn_out_get(AF_INET, skb, &ciph, offset, 1);
if (!cp)
return NF_ACCEPT;
@@ -885,7 +943,7 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
/* The embedded headers contain source and dest in reverse order */
- cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1);
+ cp = pp->conn_out_get(AF_INET6, skb, &ciph, offset, 1);
if (!cp)
return NF_ACCEPT;
@@ -924,9 +982,12 @@ static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
* Used for NAT and local client.
*/
static unsigned int
-handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
struct ip_vs_conn *cp, int ihl)
{
+ struct ip_vs_protocol *pp = pd->pp;
+ struct netns_ipvs *ipvs;
+
IP_VS_DBG_PKT(11, af, pp, skb, 0, "Outgoing packet");
if (!skb_make_writable(skb, ihl))
@@ -961,13 +1022,15 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
* if it came from this machine itself. So re-compute
* the routing information.
*/
+ ipvs = net_ipvs(skb_net(skb));
+
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
- if (sysctl_ip_vs_snat_reroute && ip6_route_me_harder(skb) != 0)
+ if (ipvs->sysctl_snat_reroute && ip6_route_me_harder(skb) != 0)
goto drop;
} else
#endif
- if ((sysctl_ip_vs_snat_reroute ||
+ if ((ipvs->sysctl_snat_reroute ||
skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
ip_route_me_harder(skb, RTN_LOCAL) != 0)
goto drop;
@@ -975,7 +1038,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
ip_vs_out_stats(cp, skb);
- ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp);
+ ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd);
skb->ipvs_property = 1;
if (!(cp->flags & IP_VS_CONN_F_NFCT))
ip_vs_notrack(skb);
@@ -999,9 +1062,12 @@ drop:
static unsigned int
ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
{
+ struct net *net = NULL;
struct ip_vs_iphdr iph;
struct ip_vs_protocol *pp;
+ struct ip_vs_proto_data *pd;
struct ip_vs_conn *cp;
+ struct netns_ipvs *ipvs;
EnterFunction(11);
@@ -1022,6 +1088,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
if (unlikely(!skb_dst(skb)))
return NF_ACCEPT;
+ net = skb_net(skb);
ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
@@ -1045,9 +1112,10 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
}
- pp = ip_vs_proto_get(iph.protocol);
- if (unlikely(!pp))
+ pd = ip_vs_proto_data_get(net, iph.protocol);
+ if (unlikely(!pd))
return NF_ACCEPT;
+ pp = pd->pp;
/* reassemble IP fragments */
#ifdef CONFIG_IP_VS_IPV6
@@ -1073,11 +1141,12 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
/*
* Check if the packet belongs to an existing entry
*/
- cp = pp->conn_out_get(af, skb, pp, &iph, iph.len, 0);
+ cp = pp->conn_out_get(af, skb, &iph, iph.len, 0);
+ ipvs = net_ipvs(net);
if (likely(cp))
- return handle_response(af, skb, pp, cp, iph.len);
- if (sysctl_ip_vs_nat_icmp_send &&
+ return handle_response(af, skb, pd, cp, iph.len);
+ if (ipvs->sysctl_nat_icmp_send &&
(pp->protocol == IPPROTO_TCP ||
pp->protocol == IPPROTO_UDP ||
pp->protocol == IPPROTO_SCTP)) {
@@ -1087,7 +1156,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
sizeof(_ports), _ports);
if (pptr == NULL)
return NF_ACCEPT; /* Not for me */
- if (ip_vs_lookup_real_service(af, iph.protocol,
+ if (ip_vs_lookup_real_service(net, af, iph.protocol,
&iph.saddr,
pptr[0])) {
/*
@@ -1202,12 +1271,14 @@ ip_vs_local_reply6(unsigned int hooknum, struct sk_buff *skb,
static int
ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
{
+ struct net *net = NULL;
struct iphdr *iph;
struct icmphdr _icmph, *ic;
struct iphdr _ciph, *cih; /* The ip header contained within the ICMP */
struct ip_vs_iphdr ciph;
struct ip_vs_conn *cp;
struct ip_vs_protocol *pp;
+ struct ip_vs_proto_data *pd;
unsigned int offset, ihl, verdict;
union nf_inet_addr snet;
@@ -1249,9 +1320,11 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
if (cih == NULL)
return NF_ACCEPT; /* The packet looks wrong, ignore */
- pp = ip_vs_proto_get(cih->protocol);
- if (!pp)
+ net = skb_net(skb);
+ pd = ip_vs_proto_data_get(net, cih->protocol);
+ if (!pd)
return NF_ACCEPT;
+ pp = pd->pp;
/* Is the embedded protocol header present? */
if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
@@ -1265,10 +1338,10 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
ip_vs_fill_iphdr(AF_INET, cih, &ciph);
/* The embedded headers contain source and dest in reverse order */
- cp = pp->conn_in_get(AF_INET, skb, pp, &ciph, offset, 1);
+ cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, 1);
if (!cp) {
/* The packet could also belong to a local client */
- cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1);
+ cp = pp->conn_out_get(AF_INET, skb, &ciph, offset, 1);
if (cp) {
snet.ip = iph->saddr;
return handle_response_icmp(AF_INET, skb, &snet,
@@ -1312,6 +1385,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
static int
ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
{
+ struct net *net = NULL;
struct ipv6hdr *iph;
struct icmp6hdr _icmph, *ic;
struct ipv6hdr _ciph, *cih; /* The ip header contained
@@ -1319,6 +1393,7 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
struct ip_vs_iphdr ciph;
struct ip_vs_conn *cp;
struct ip_vs_protocol *pp;
+ struct ip_vs_proto_data *pd;
unsigned int offset, verdict;
union nf_inet_addr snet;
struct rt6_info *rt;
@@ -1361,9 +1436,11 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
if (cih == NULL)
return NF_ACCEPT; /* The packet looks wrong, ignore */
- pp = ip_vs_proto_get(cih->nexthdr);
- if (!pp)
+ net = skb_net(skb);
+ pd = ip_vs_proto_data_get(net, cih->nexthdr);
+ if (!pd)
return NF_ACCEPT;
+ pp = pd->pp;
/* Is the embedded protocol header present? */
/* TODO: we don't support fragmentation at the moment anyways */
@@ -1377,10 +1454,10 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
/* The embedded headers contain source and dest in reverse order */
- cp = pp->conn_in_get(AF_INET6, skb, pp, &ciph, offset, 1);
+ cp = pp->conn_in_get(AF_INET6, skb, &ciph, offset, 1);
if (!cp) {
/* The packet could also belong to a local client */
- cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1);
+ cp = pp->conn_out_get(AF_INET6, skb, &ciph, offset, 1);
if (cp) {
ipv6_addr_copy(&snet.in6, &iph->saddr);
return handle_response_icmp(AF_INET6, skb, &snet,
@@ -1423,10 +1500,13 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
static unsigned int
ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
{
+ struct net *net;
struct ip_vs_iphdr iph;
struct ip_vs_protocol *pp;
+ struct ip_vs_proto_data *pd;
struct ip_vs_conn *cp;
int ret, restart, pkts;
+ struct netns_ipvs *ipvs;
/* Already marked as IPVS request or reply? */
if (skb->ipvs_property)
@@ -1480,20 +1560,21 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
}
+ net = skb_net(skb);
/* Protocol supported? */
- pp = ip_vs_proto_get(iph.protocol);
- if (unlikely(!pp))
+ pd = ip_vs_proto_data_get(net, iph.protocol);
+ if (unlikely(!pd))
return NF_ACCEPT;
-
+ pp = pd->pp;
/*
* Check if the packet belongs to an existing connection entry
*/
- cp = pp->conn_in_get(af, skb, pp, &iph, iph.len, 0);
+ cp = pp->conn_in_get(af, skb, &iph, iph.len, 0);
if (unlikely(!cp)) {
int v;
- if (!pp->conn_schedule(af, skb, pp, &v, &cp))
+ if (!pp->conn_schedule(af, skb, pd, &v, &cp))
return v;
}
@@ -1505,12 +1586,13 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
}
IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet");
-
+ net = skb_net(skb);
+ ipvs = net_ipvs(net);
/* Check the server status */
if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
/* the destination server is not available */
- if (sysctl_ip_vs_expire_nodest_conn) {
+ if (ipvs->sysctl_expire_nodest_conn) {
/* try to expire the connection immediately */
ip_vs_conn_expire_now(cp);
}
@@ -1521,7 +1603,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
}
ip_vs_in_stats(cp, skb);
- restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pp);
+ restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
if (cp->packet_xmit)
ret = cp->packet_xmit(skb, cp, pp);
/* do not touch skb anymore */
@@ -1535,35 +1617,41 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
*
* Sync connection if it is about to close to
* encorage the standby servers to update the connections timeout
+ *
+ * For ONE_PKT let ip_vs_sync_conn() do the filter work.
*/
- pkts = atomic_add_return(1, &cp->in_pkts);
- if (af == AF_INET && (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
+
+ if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+ pkts = ipvs->sysctl_sync_threshold[0];
+ else
+ pkts = atomic_add_return(1, &cp->in_pkts);
+
+ if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
cp->protocol == IPPROTO_SCTP) {
if ((cp->state == IP_VS_SCTP_S_ESTABLISHED &&
- (pkts % sysctl_ip_vs_sync_threshold[1]
- == sysctl_ip_vs_sync_threshold[0])) ||
+ (pkts % ipvs->sysctl_sync_threshold[1]
+ == ipvs->sysctl_sync_threshold[0])) ||
(cp->old_state != cp->state &&
((cp->state == IP_VS_SCTP_S_CLOSED) ||
(cp->state == IP_VS_SCTP_S_SHUT_ACK_CLI) ||
(cp->state == IP_VS_SCTP_S_SHUT_ACK_SER)))) {
- ip_vs_sync_conn(cp);
+ ip_vs_sync_conn(net, cp);
goto out;
}
}
/* Keep this block last: TCP and others with pp->num_states <= 1 */
- else if (af == AF_INET &&
- (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
+ else if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
(((cp->protocol != IPPROTO_TCP ||
cp->state == IP_VS_TCP_S_ESTABLISHED) &&
- (pkts % sysctl_ip_vs_sync_threshold[1]
- == sysctl_ip_vs_sync_threshold[0])) ||
+ (pkts % ipvs->sysctl_sync_threshold[1]
+ == ipvs->sysctl_sync_threshold[0])) ||
((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
(cp->state == IP_VS_TCP_S_CLOSE) ||
(cp->state == IP_VS_TCP_S_CLOSE_WAIT) ||
(cp->state == IP_VS_TCP_S_TIME_WAIT)))))
- ip_vs_sync_conn(cp);
+ ip_vs_sync_conn(net, cp);
out:
cp->old_state = cp->state;
@@ -1782,7 +1870,39 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
},
#endif
};
+/*
+ * Initialize IP Virtual Server netns mem.
+ */
+static int __net_init __ip_vs_init(struct net *net)
+{
+ struct netns_ipvs *ipvs;
+
+ ipvs = net_generic(net, ip_vs_net_id);
+ if (ipvs == NULL) {
+ pr_err("%s(): no memory.\n", __func__);
+ return -ENOMEM;
+ }
+ ipvs->net = net;
+ /* Counters used for creating unique names */
+ ipvs->gen = atomic_read(&ipvs_netns_cnt);
+ atomic_inc(&ipvs_netns_cnt);
+ net->ipvs = ipvs;
+ printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
+ sizeof(struct netns_ipvs), ipvs->gen);
+ return 0;
+}
+static void __net_exit __ip_vs_cleanup(struct net *net)
+{
+ IP_VS_DBG(10, "ipvs netns %d released\n", net_ipvs(net)->gen);
+}
+
+static struct pernet_operations ipvs_core_ops = {
+ .init = __ip_vs_init,
+ .exit = __ip_vs_cleanup,
+ .id = &ip_vs_net_id,
+ .size = sizeof(struct netns_ipvs),
+};
/*
* Initialize IP Virtual Server
@@ -1791,8 +1911,11 @@ static int __init ip_vs_init(void)
{
int ret;
- ip_vs_estimator_init();
+ ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */
+ if (ret < 0)
+ return ret;
+ ip_vs_estimator_init();
ret = ip_vs_control_init();
if (ret < 0) {
pr_err("can't setup control.\n");
@@ -1813,15 +1936,23 @@ static int __init ip_vs_init(void)
goto cleanup_app;
}
+ ret = ip_vs_sync_init();
+ if (ret < 0) {
+ pr_err("can't setup sync data.\n");
+ goto cleanup_conn;
+ }
+
ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
if (ret < 0) {
pr_err("can't register hooks.\n");
- goto cleanup_conn;
+ goto cleanup_sync;
}
pr_info("ipvs loaded.\n");
return ret;
+cleanup_sync:
+ ip_vs_sync_cleanup();
cleanup_conn:
ip_vs_conn_cleanup();
cleanup_app:
@@ -1831,17 +1962,20 @@ static int __init ip_vs_init(void)
ip_vs_control_cleanup();
cleanup_estimator:
ip_vs_estimator_cleanup();
+ unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
return ret;
}
static void __exit ip_vs_cleanup(void)
{
nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
+ ip_vs_sync_cleanup();
ip_vs_conn_cleanup();
ip_vs_app_cleanup();
ip_vs_protocol_cleanup();
ip_vs_control_cleanup();
ip_vs_estimator_cleanup();
+ unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
pr_info("ipvs unloaded.\n");
}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 22f7ad5..c73b0c8 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -38,6 +38,7 @@
#include <linux/mutex.h>
#include <net/net_namespace.h>
+#include <linux/nsproxy.h>
#include <net/ip.h>
#ifdef CONFIG_IP_VS_IPV6
#include <net/ipv6.h>
@@ -57,42 +58,7 @@ static DEFINE_MUTEX(__ip_vs_mutex);
/* lock for service table */
static DEFINE_RWLOCK(__ip_vs_svc_lock);
-/* lock for table with the real services */
-static DEFINE_RWLOCK(__ip_vs_rs_lock);
-
-/* lock for state and timeout tables */
-static DEFINE_SPINLOCK(ip_vs_securetcp_lock);
-
-/* lock for drop entry handling */
-static DEFINE_SPINLOCK(__ip_vs_dropentry_lock);
-
-/* lock for drop packet handling */
-static DEFINE_SPINLOCK(__ip_vs_droppacket_lock);
-
-/* 1/rate drop and drop-entry variables */
-int ip_vs_drop_rate = 0;
-int ip_vs_drop_counter = 0;
-static atomic_t ip_vs_dropentry = ATOMIC_INIT(0);
-
-/* number of virtual services */
-static int ip_vs_num_services = 0;
-
/* sysctl variables */
-static int sysctl_ip_vs_drop_entry = 0;
-static int sysctl_ip_vs_drop_packet = 0;
-static int sysctl_ip_vs_secure_tcp = 0;
-static int sysctl_ip_vs_amemthresh = 1024;
-static int sysctl_ip_vs_am_droprate = 10;
-int sysctl_ip_vs_cache_bypass = 0;
-int sysctl_ip_vs_expire_nodest_conn = 0;
-int sysctl_ip_vs_expire_quiescent_template = 0;
-int sysctl_ip_vs_sync_threshold[2] = { 3, 50 };
-int sysctl_ip_vs_nat_icmp_send = 0;
-#ifdef CONFIG_IP_VS_NFCT
-int sysctl_ip_vs_conntrack;
-#endif
-int sysctl_ip_vs_snat_reroute = 1;
-
#ifdef CONFIG_IP_VS_DEBUG
static int sysctl_ip_vs_debug_level = 0;
@@ -105,7 +71,8 @@ int ip_vs_get_debug_level(void)
#ifdef CONFIG_IP_VS_IPV6
/* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
-static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr)
+static int __ip_vs_addr_is_local_v6(struct net *net,
+ const struct in6_addr *addr)
{
struct rt6_info *rt;
struct flowi fl = {
@@ -114,7 +81,7 @@ static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr)
.fl6_src = { .s6_addr32 = {0, 0, 0, 0} },
};
- rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
+ rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl);
if (rt && rt->rt6i_dev && (rt->rt6i_dev->flags & IFF_LOOPBACK))
return 1;
@@ -125,7 +92,7 @@ static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr)
* update_defense_level is called from keventd and from sysctl,
* so it needs to protect itself from softirqs
*/
-static void update_defense_level(void)
+static void update_defense_level(struct netns_ipvs *ipvs)
{
struct sysinfo i;
static int old_secure_tcp = 0;
@@ -141,73 +108,73 @@ static void update_defense_level(void)
/* si_swapinfo(&i); */
/* availmem = availmem - (i.totalswap - i.freeswap); */
- nomem = (availmem < sysctl_ip_vs_amemthresh);
+ nomem = (availmem < ipvs->sysctl_amemthresh);
local_bh_disable();
/* drop_entry */
- spin_lock(&__ip_vs_dropentry_lock);
- switch (sysctl_ip_vs_drop_entry) {
+ spin_lock(&ipvs->dropentry_lock);
+ switch (ipvs->sysctl_drop_entry) {
case 0:
- atomic_set(&ip_vs_dropentry, 0);
+ atomic_set(&ipvs->dropentry, 0);
break;
case 1:
if (nomem) {
- atomic_set(&ip_vs_dropentry, 1);
- sysctl_ip_vs_drop_entry = 2;
+ atomic_set(&ipvs->dropentry, 1);
+ ipvs->sysctl_drop_entry = 2;
} else {
- atomic_set(&ip_vs_dropentry, 0);
+ atomic_set(&ipvs->dropentry, 0);
}
break;
case 2:
if (nomem) {
- atomic_set(&ip_vs_dropentry, 1);
+ atomic_set(&ipvs->dropentry, 1);
} else {
- atomic_set(&ip_vs_dropentry, 0);
- sysctl_ip_vs_drop_entry = 1;
+ atomic_set(&ipvs->dropentry, 0);
+ ipvs->sysctl_drop_entry = 1;
};
break;
case 3:
- atomic_set(&ip_vs_dropentry, 1);
+ atomic_set(&ipvs->dropentry, 1);
break;
}
- spin_unlock(&__ip_vs_dropentry_lock);
+ spin_unlock(&ipvs->dropentry_lock);
/* drop_packet */
- spin_lock(&__ip_vs_droppacket_lock);
- switch (sysctl_ip_vs_drop_packet) {
+ spin_lock(&ipvs->droppacket_lock);
+ switch (ipvs->sysctl_drop_packet) {
case 0:
- ip_vs_drop_rate = 0;
+ ipvs->drop_rate = 0;
break;
case 1:
if (nomem) {
- ip_vs_drop_rate = ip_vs_drop_counter
- = sysctl_ip_vs_amemthresh /
- (sysctl_ip_vs_amemthresh-availmem);
- sysctl_ip_vs_drop_packet = 2;
+ ipvs->drop_rate = ipvs->drop_counter
+ = ipvs->sysctl_amemthresh /
+ (ipvs->sysctl_amemthresh-availmem);
+ ipvs->sysctl_drop_packet = 2;
} else {
- ip_vs_drop_rate = 0;
+ ipvs->drop_rate = 0;
}
break;
case 2:
if (nomem) {
- ip_vs_drop_rate = ip_vs_drop_counter
- = sysctl_ip_vs_amemthresh /
- (sysctl_ip_vs_amemthresh-availmem);
+ ipvs->drop_rate = ipvs->drop_counter
+ = ipvs->sysctl_amemthresh /
+ (ipvs->sysctl_amemthresh-availmem);
} else {
- ip_vs_drop_rate = 0;
- sysctl_ip_vs_drop_packet = 1;
+ ipvs->drop_rate = 0;
+ ipvs->sysctl_drop_packet = 1;
}
break;
case 3:
- ip_vs_drop_rate = sysctl_ip_vs_am_droprate;
+ ipvs->drop_rate = ipvs->sysctl_am_droprate;
break;
}
- spin_unlock(&__ip_vs_droppacket_lock);
+ spin_unlock(&ipvs->droppacket_lock);
/* secure_tcp */
- spin_lock(&ip_vs_securetcp_lock);
- switch (sysctl_ip_vs_secure_tcp) {
+ spin_lock(&ipvs->securetcp_lock);
+ switch (ipvs->sysctl_secure_tcp) {
case 0:
if (old_secure_tcp >= 2)
to_change = 0;
@@ -216,7 +183,7 @@ static void update_defense_level(void)
if (nomem) {
if (old_secure_tcp < 2)
to_change = 1;
- sysctl_ip_vs_secure_tcp = 2;
+ ipvs->sysctl_secure_tcp = 2;
} else {
if (old_secure_tcp >= 2)
to_change = 0;
@@ -229,7 +196,7 @@ static void update_defense_level(void)
} else {
if (old_secure_tcp >= 2)
to_change = 0;
- sysctl_ip_vs_secure_tcp = 1;
+ ipvs->sysctl_secure_tcp = 1;
}
break;
case 3:
@@ -237,10 +204,11 @@ static void update_defense_level(void)
to_change = 1;
break;
}
- old_secure_tcp = sysctl_ip_vs_secure_tcp;
+ old_secure_tcp = ipvs->sysctl_secure_tcp;
if (to_change >= 0)
- ip_vs_protocol_timeout_change(sysctl_ip_vs_secure_tcp>1);
- spin_unlock(&ip_vs_securetcp_lock);
+ ip_vs_protocol_timeout_change(ipvs,
+ ipvs->sysctl_secure_tcp > 1);
+ spin_unlock(&ipvs->securetcp_lock);
local_bh_enable();
}
@@ -250,16 +218,16 @@ static void update_defense_level(void)
* Timer for checking the defense
*/
#define DEFENSE_TIMER_PERIOD 1*HZ
-static void defense_work_handler(struct work_struct *work);
-static DECLARE_DELAYED_WORK(defense_work, defense_work_handler);
static void defense_work_handler(struct work_struct *work)
{
- update_defense_level();
- if (atomic_read(&ip_vs_dropentry))
- ip_vs_random_dropentry();
+ struct netns_ipvs *ipvs =
+ container_of(work, struct netns_ipvs, defense_work.work);
- schedule_delayed_work(&defense_work, DEFENSE_TIMER_PERIOD);
+ update_defense_level(ipvs);
+ if (atomic_read(&ipvs->dropentry))
+ ip_vs_random_dropentry(ipvs->net);
+ schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
}
int
@@ -287,33 +255,13 @@ static struct list_head ip_vs_svc_table[IP_VS_SVC_TAB_SIZE];
/* the service table hashed by fwmark */
static struct list_head ip_vs_svc_fwm_table[IP_VS_SVC_TAB_SIZE];
-/*
- * Hash table: for real service lookups
- */
-#define IP_VS_RTAB_BITS 4
-#define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
-#define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
-
-static struct list_head ip_vs_rtable[IP_VS_RTAB_SIZE];
-
-/*
- * Trash for destinations
- */
-static LIST_HEAD(ip_vs_dest_trash);
-
-/*
- * FTP & NULL virtual service counters
- */
-static atomic_t ip_vs_ftpsvc_counter = ATOMIC_INIT(0);
-static atomic_t ip_vs_nullsvc_counter = ATOMIC_INIT(0);
-
/*
* Returns hash value for virtual service
*/
-static __inline__ unsigned
-ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr,
- __be16 port)
+static inline unsigned
+ip_vs_svc_hashkey(struct net *net, int af, unsigned proto,
+ const union nf_inet_addr *addr, __be16 port)
{
register unsigned porth = ntohs(port);
__be32 addr_fold = addr->ip;
@@ -323,6 +271,7 @@ ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr,
addr_fold = addr->ip6[0]^addr->ip6[1]^
addr->ip6[2]^addr->ip6[3];
#endif
+ addr_fold ^= ((size_t)net>>8);
return (proto^ntohl(addr_fold)^(porth>>IP_VS_SVC_TAB_BITS)^porth)
& IP_VS_SVC_TAB_MASK;
@@ -331,13 +280,13 @@ ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr,
/*
* Returns hash value of fwmark for virtual service lookup
*/
-static __inline__ unsigned ip_vs_svc_fwm_hashkey(__u32 fwmark)
+static inline unsigned ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark)
{
- return fwmark & IP_VS_SVC_TAB_MASK;
+ return (((size_t)net>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK;
}
/*
- * Hashes a service in the ip_vs_svc_table by <proto,addr,port>
+ * Hashes a service in the ip_vs_svc_table by <netns,proto,addr,port>
* or in the ip_vs_svc_fwm_table by fwmark.
* Should be called with locked tables.
*/
@@ -353,16 +302,16 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc)
if (svc->fwmark == 0) {
/*
- * Hash it by <protocol,addr,port> in ip_vs_svc_table
+ * Hash it by <netns,protocol,addr,port> in ip_vs_svc_table
*/
- hash = ip_vs_svc_hashkey(svc->af, svc->protocol, &svc->addr,
- svc->port);
+ hash = ip_vs_svc_hashkey(svc->net, svc->af, svc->protocol,
+ &svc->addr, svc->port);
list_add(&svc->s_list, &ip_vs_svc_table[hash]);
} else {
/*
- * Hash it by fwmark in ip_vs_svc_fwm_table
+ * Hash it by fwmark in svc_fwm_table
*/
- hash = ip_vs_svc_fwm_hashkey(svc->fwmark);
+ hash = ip_vs_svc_fwm_hashkey(svc->net, svc->fwmark);
list_add(&svc->f_list, &ip_vs_svc_fwm_table[hash]);
}
@@ -374,7 +323,7 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc)
/*
- * Unhashes a service from ip_vs_svc_table/ip_vs_svc_fwm_table.
+ * Unhashes a service from svc_table / svc_fwm_table.
* Should be called with locked tables.
*/
static int ip_vs_svc_unhash(struct ip_vs_service *svc)
@@ -386,10 +335,10 @@ static int ip_vs_svc_unhash(struct ip_vs_service *svc)
}
if (svc->fwmark == 0) {
- /* Remove it from the ip_vs_svc_table table */
+ /* Remove it from the svc_table table */
list_del(&svc->s_list);
} else {
- /* Remove it from the ip_vs_svc_fwm_table table */
+ /* Remove it from the svc_fwm_table table */
list_del(&svc->f_list);
}
@@ -400,23 +349,24 @@ static int ip_vs_svc_unhash(struct ip_vs_service *svc)
/*
- * Get service by {proto,addr,port} in the service table.
+ * Get service by {netns, proto,addr,port} in the service table.
*/
static inline struct ip_vs_service *
-__ip_vs_service_find(int af, __u16 protocol, const union nf_inet_addr *vaddr,
- __be16 vport)
+__ip_vs_service_find(struct net *net, int af, __u16 protocol,
+ const union nf_inet_addr *vaddr, __be16 vport)
{
unsigned hash;
struct ip_vs_service *svc;
/* Check for "full" addressed entries */
- hash = ip_vs_svc_hashkey(af, protocol, vaddr, vport);
+ hash = ip_vs_svc_hashkey(net, af, protocol, vaddr, vport);
list_for_each_entry(svc, &ip_vs_svc_table[hash], s_list){
if ((svc->af == af)
&& ip_vs_addr_equal(af, &svc->addr, vaddr)
&& (svc->port == vport)
- && (svc->protocol == protocol)) {
+ && (svc->protocol == protocol)
+ && net_eq(svc->net, net)) {
/* HIT */
return svc;
}
@@ -430,16 +380,17 @@ __ip_vs_service_find(int af, __u16 protocol, const union nf_inet_addr *vaddr,
* Get service by {fwmark} in the service table.
*/
static inline struct ip_vs_service *
-__ip_vs_svc_fwm_find(int af, __u32 fwmark)
+__ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark)
{
unsigned hash;
struct ip_vs_service *svc;
/* Check for fwmark addressed entries */
- hash = ip_vs_svc_fwm_hashkey(fwmark);
+ hash = ip_vs_svc_fwm_hashkey(net, fwmark);
list_for_each_entry(svc, &ip_vs_svc_fwm_table[hash], f_list) {
- if (svc->fwmark == fwmark && svc->af == af) {
+ if (svc->fwmark == fwmark && svc->af == af
+ && net_eq(svc->net, net)) {
/* HIT */
return svc;
}
@@ -449,42 +400,44 @@ __ip_vs_svc_fwm_find(int af, __u32 fwmark)
}
struct ip_vs_service *
-ip_vs_service_get(int af, __u32 fwmark, __u16 protocol,
+ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol,
const union nf_inet_addr *vaddr, __be16 vport)
{
struct ip_vs_service *svc;
+ struct netns_ipvs *ipvs = net_ipvs(net);
read_lock(&__ip_vs_svc_lock);
/*
* Check the table hashed by fwmark first
*/
- if (fwmark && (svc = __ip_vs_svc_fwm_find(af, fwmark)))
+ svc = __ip_vs_svc_fwm_find(net, af, fwmark);
+ if (fwmark && svc)
goto out;
/*
* Check the table hashed by <protocol,addr,port>
* for "full" addressed entries
*/
- svc = __ip_vs_service_find(af, protocol, vaddr, vport);
+ svc = __ip_vs_service_find(net, af, protocol, vaddr, vport);
if (svc == NULL
&& protocol == IPPROTO_TCP
- && atomic_read(&ip_vs_ftpsvc_counter)
+ && atomic_read(&ipvs->ftpsvc_counter)
&& (vport == FTPDATA || ntohs(vport) >= PROT_SOCK)) {
/*
* Check if ftp service entry exists, the packet
* might belong to FTP data connections.
*/
- svc = __ip_vs_service_find(af, protocol, vaddr, FTPPORT);
+ svc = __ip_vs_service_find(net, af, protocol, vaddr, FTPPORT);
}
if (svc == NULL
- && atomic_read(&ip_vs_nullsvc_counter)) {
+ && atomic_read(&ipvs->nullsvc_counter)) {
/*
* Check if the catch-all port (port zero) exists
*/
- svc = __ip_vs_service_find(af, protocol, vaddr, 0);
+ svc = __ip_vs_service_find(net, af, protocol, vaddr, 0);
}
out:
@@ -519,6 +472,7 @@ __ip_vs_unbind_svc(struct ip_vs_dest *dest)
svc->fwmark,
IP_VS_DBG_ADDR(svc->af, &svc->addr),
ntohs(svc->port), atomic_read(&svc->usecnt));
+ free_percpu(svc->stats.cpustats);
kfree(svc);
}
}
@@ -545,10 +499,10 @@ static inline unsigned ip_vs_rs_hashkey(int af,
}
/*
- * Hashes ip_vs_dest in ip_vs_rtable by <proto,addr,port>.
+ * Hashes ip_vs_dest in rs_table by <proto,addr,port>.
* should be called with locked tables.
*/
-static int ip_vs_rs_hash(struct ip_vs_dest *dest)
+static int ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest)
{
unsigned hash;
@@ -562,19 +516,19 @@ static int ip_vs_rs_hash(struct ip_vs_dest *dest)
*/
hash = ip_vs_rs_hashkey(dest->af, &dest->addr, dest->port);
- list_add(&dest->d_list, &ip_vs_rtable[hash]);
+ list_add(&dest->d_list, &ipvs->rs_table[hash]);
return 1;
}
/*
- * UNhashes ip_vs_dest from ip_vs_rtable.
+ * UNhashes ip_vs_dest from rs_table.
* should be called with locked tables.
*/
static int ip_vs_rs_unhash(struct ip_vs_dest *dest)
{
/*
- * Remove it from the ip_vs_rtable table.
+ * Remove it from the rs_table table.
*/
if (!list_empty(&dest->d_list)) {
list_del(&dest->d_list);
@@ -588,10 +542,11 @@ static int ip_vs_rs_unhash(struct ip_vs_dest *dest)
* Lookup real service by <proto,addr,port> in the real service table.
*/
struct ip_vs_dest *
-ip_vs_lookup_real_service(int af, __u16 protocol,
+ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol,
const union nf_inet_addr *daddr,
__be16 dport)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
unsigned hash;
struct ip_vs_dest *dest;
@@ -601,19 +556,19 @@ ip_vs_lookup_real_service(int af, __u16 protocol,
*/
hash = ip_vs_rs_hashkey(af, daddr, dport);
- read_lock(&__ip_vs_rs_lock);
- list_for_each_entry(dest, &ip_vs_rtable[hash], d_list) {
+ read_lock(&ipvs->rs_lock);
+ list_for_each_entry(dest, &ipvs->rs_table[hash], d_list) {
if ((dest->af == af)
&& ip_vs_addr_equal(af, &dest->addr, daddr)
&& (dest->port == dport)
&& ((dest->protocol == protocol) ||
dest->vfwmark)) {
/* HIT */
- read_unlock(&__ip_vs_rs_lock);
+ read_unlock(&ipvs->rs_lock);
return dest;
}
}
- read_unlock(&__ip_vs_rs_lock);
+ read_unlock(&ipvs->rs_lock);
return NULL;
}
@@ -652,15 +607,16 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
* ip_vs_lookup_real_service() looked promissing, but
* seems not working as expected.
*/
-struct ip_vs_dest *ip_vs_find_dest(int af, const union nf_inet_addr *daddr,
+struct ip_vs_dest *ip_vs_find_dest(struct net *net, int af,
+ const union nf_inet_addr *daddr,
__be16 dport,
const union nf_inet_addr *vaddr,
- __be16 vport, __u16 protocol)
+ __be16 vport, __u16 protocol, __u32 fwmark)
{
struct ip_vs_dest *dest;
struct ip_vs_service *svc;
- svc = ip_vs_service_get(af, 0, protocol, vaddr, vport);
+ svc = ip_vs_service_get(net, af, fwmark, protocol, vaddr, vport);
if (!svc)
return NULL;
dest = ip_vs_lookup_dest(svc, daddr, dport);
@@ -685,11 +641,12 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
__be16 dport)
{
struct ip_vs_dest *dest, *nxt;
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
/*
* Find the destination in trash
*/
- list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) {
+ list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, n_list) {
IP_VS_DBG_BUF(3, "Destination %u/%s:%u still in trash, "
"dest->refcnt=%d\n",
dest->vfwmark,
@@ -720,6 +677,7 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
list_del(&dest->n_list);
ip_vs_dst_reset(dest);
__ip_vs_unbind_svc(dest);
+ free_percpu(dest->stats.cpustats);
kfree(dest);
}
}
@@ -737,14 +695,16 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
* are expired, and the refcnt of each destination in the trash must
* be 1, so we simply release them here.
*/
-static void ip_vs_trash_cleanup(void)
+static void ip_vs_trash_cleanup(struct net *net)
{
struct ip_vs_dest *dest, *nxt;
+ struct netns_ipvs *ipvs = net_ipvs(net);
- list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) {
+ list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, n_list) {
list_del(&dest->n_list);
ip_vs_dst_reset(dest);
__ip_vs_unbind_svc(dest);
+ free_percpu(dest->stats.cpustats);
kfree(dest);
}
}
@@ -768,6 +728,7 @@ static void
__ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
struct ip_vs_dest_user_kern *udest, int add)
{
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
int conn_flags;
/* set the weight and the flags */
@@ -780,12 +741,12 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
conn_flags |= IP_VS_CONN_F_NOOUTPUT;
} else {
/*
- * Put the real service in ip_vs_rtable if not present.
+ * Put the real service in rs_table if not present.
* For now only for NAT!
*/
- write_lock_bh(&__ip_vs_rs_lock);
- ip_vs_rs_hash(dest);
- write_unlock_bh(&__ip_vs_rs_lock);
+ write_lock_bh(&ipvs->rs_lock);
+ ip_vs_rs_hash(ipvs, dest);
+ write_unlock_bh(&ipvs->rs_lock);
}
atomic_set(&dest->conn_flags, conn_flags);
@@ -813,7 +774,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
spin_unlock(&dest->dst_lock);
if (add)
- ip_vs_new_estimator(&dest->stats);
+ ip_vs_new_estimator(svc->net, &dest->stats);
write_lock_bh(&__ip_vs_svc_lock);
@@ -850,12 +811,12 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
atype = ipv6_addr_type(&udest->addr.in6);
if ((!(atype & IPV6_ADDR_UNICAST) ||
atype & IPV6_ADDR_LINKLOCAL) &&
- !__ip_vs_addr_is_local_v6(&udest->addr.in6))
+ !__ip_vs_addr_is_local_v6(svc->net, &udest->addr.in6))
return -EINVAL;
} else
#endif
{
- atype = inet_addr_type(&init_net, udest->addr.ip);
+ atype = inet_addr_type(svc->net, udest->addr.ip);
if (atype != RTN_LOCAL && atype != RTN_UNICAST)
return -EINVAL;
}
@@ -865,6 +826,11 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
pr_err("%s(): no memory.\n", __func__);
return -ENOMEM;
}
+ dest->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
+ if (!dest->stats.cpustats) {
+ pr_err("%s() alloc_percpu failed\n", __func__);
+ goto err_alloc;
+ }
dest->af = svc->af;
dest->protocol = svc->protocol;
@@ -888,6 +854,10 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
LeaveFunction(2);
return 0;
+
+err_alloc:
+ kfree(dest);
+ return -ENOMEM;
}
@@ -1006,16 +976,18 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
/*
* Delete a destination (must be already unlinked from the service)
*/
-static void __ip_vs_del_dest(struct ip_vs_dest *dest)
+static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest)
{
- ip_vs_kill_estimator(&dest->stats);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ ip_vs_kill_estimator(net, &dest->stats);
/*
* Remove it from the d-linked list with the real services.
*/
- write_lock_bh(&__ip_vs_rs_lock);
+ write_lock_bh(&ipvs->rs_lock);
ip_vs_rs_unhash(dest);
- write_unlock_bh(&__ip_vs_rs_lock);
+ write_unlock_bh(&ipvs->rs_lock);
/*
* Decrease the refcnt of the dest, and free the dest
@@ -1034,6 +1006,7 @@ static void __ip_vs_del_dest(struct ip_vs_dest *dest)
and only one user context can update virtual service at a
time, so the operation here is OK */
atomic_dec(&dest->svc->refcnt);
+ free_percpu(dest->stats.cpustats);
kfree(dest);
} else {
IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, "
@@ -1041,7 +1014,7 @@ static void __ip_vs_del_dest(struct ip_vs_dest *dest)
IP_VS_DBG_ADDR(dest->af, &dest->addr),
ntohs(dest->port),
atomic_read(&dest->refcnt));
- list_add(&dest->n_list, &ip_vs_dest_trash);
+ list_add(&dest->n_list, &ipvs->dest_trash);
atomic_inc(&dest->refcnt);
}
}
@@ -1105,7 +1078,7 @@ ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
/*
* Delete the destination
*/
- __ip_vs_del_dest(dest);
+ __ip_vs_del_dest(svc->net, dest);
LeaveFunction(2);
@@ -1117,13 +1090,14 @@ ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
* Add a service into the service hash table
*/
static int
-ip_vs_add_service(struct ip_vs_service_user_kern *u,
+ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
struct ip_vs_service **svc_p)
{
int ret = 0;
struct ip_vs_scheduler *sched = NULL;
struct ip_vs_pe *pe = NULL;
struct ip_vs_service *svc = NULL;
+ struct netns_ipvs *ipvs = net_ipvs(net);
/* increase the module use count */
ip_vs_use_count_inc();
@@ -1137,7 +1111,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
}
if (u->pe_name && *u->pe_name) {
- pe = ip_vs_pe_get(u->pe_name);
+ pe = ip_vs_pe_getbyname(u->pe_name);
if (pe == NULL) {
pr_info("persistence engine module ip_vs_pe_%s "
"not found\n", u->pe_name);
@@ -1159,6 +1133,11 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
ret = -ENOMEM;
goto out_err;
}
+ svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
+ if (!svc->stats.cpustats) {
+ pr_err("%s() alloc_percpu failed\n", __func__);
+ goto out_err;
+ }
/* I'm the first user of the service */
atomic_set(&svc->usecnt, 0);
@@ -1172,6 +1151,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
svc->flags = u->flags;
svc->timeout = u->timeout * HZ;
svc->netmask = u->netmask;
+ svc->net = net;
INIT_LIST_HEAD(&svc->destinations);
rwlock_init(&svc->sched_lock);
@@ -1189,15 +1169,15 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
/* Update the virtual service counters */
if (svc->port == FTPPORT)
- atomic_inc(&ip_vs_ftpsvc_counter);
+ atomic_inc(&ipvs->ftpsvc_counter);
else if (svc->port == 0)
- atomic_inc(&ip_vs_nullsvc_counter);
+ atomic_inc(&ipvs->nullsvc_counter);
- ip_vs_new_estimator(&svc->stats);
+ ip_vs_new_estimator(net, &svc->stats);
/* Count only IPv4 services for old get/setsockopt interface */
if (svc->af == AF_INET)
- ip_vs_num_services++;
+ ipvs->num_services++;
/* Hash the service into the service table */
write_lock_bh(&__ip_vs_svc_lock);
@@ -1207,6 +1187,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
*svc_p = svc;
return 0;
+
out_err:
if (svc != NULL) {
ip_vs_unbind_scheduler(svc);
@@ -1215,6 +1196,8 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
ip_vs_app_inc_put(svc->inc);
local_bh_enable();
}
+ if (svc->stats.cpustats)
+ free_percpu(svc->stats.cpustats);
kfree(svc);
}
ip_vs_scheduler_put(sched);
@@ -1248,7 +1231,7 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
old_sched = sched;
if (u->pe_name && *u->pe_name) {
- pe = ip_vs_pe_get(u->pe_name);
+ pe = ip_vs_pe_getbyname(u->pe_name);
if (pe == NULL) {
pr_info("persistence engine module ip_vs_pe_%s "
"not found\n", u->pe_name);
@@ -1334,14 +1317,15 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
struct ip_vs_dest *dest, *nxt;
struct ip_vs_scheduler *old_sched;
struct ip_vs_pe *old_pe;
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
pr_info("%s: enter\n", __func__);
/* Count only IPv4 services for old get/setsockopt interface */
if (svc->af == AF_INET)
- ip_vs_num_services--;
+ ipvs->num_services--;
- ip_vs_kill_estimator(&svc->stats);
+ ip_vs_kill_estimator(svc->net, &svc->stats);
/* Unbind scheduler */
old_sched = svc->scheduler;
@@ -1364,16 +1348,16 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
*/
list_for_each_entry_safe(dest, nxt, &svc->destinations, n_list) {
__ip_vs_unlink_dest(svc, dest, 0);
- __ip_vs_del_dest(dest);
+ __ip_vs_del_dest(svc->net, dest);
}
/*
* Update the virtual service counters
*/
if (svc->port == FTPPORT)
- atomic_dec(&ip_vs_ftpsvc_counter);
+ atomic_dec(&ipvs->ftpsvc_counter);
else if (svc->port == 0)
- atomic_dec(&ip_vs_nullsvc_counter);
+ atomic_dec(&ipvs->nullsvc_counter);
/*
* Free the service if nobody refers to it
@@ -1383,6 +1367,7 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
svc->fwmark,
IP_VS_DBG_ADDR(svc->af, &svc->addr),
ntohs(svc->port), atomic_read(&svc->usecnt));
+ free_percpu(svc->stats.cpustats);
kfree(svc);
}
@@ -1428,17 +1413,19 @@ static int ip_vs_del_service(struct ip_vs_service *svc)
/*
* Flush all the virtual services
*/
-static int ip_vs_flush(void)
+static int ip_vs_flush(struct net *net)
{
int idx;
struct ip_vs_service *svc, *nxt;
/*
- * Flush the service table hashed by <protocol,addr,port>
+ * Flush the service table hashed by <netns,protocol,addr,port>
*/
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
- list_for_each_entry_safe(svc, nxt, &ip_vs_svc_table[idx], s_list) {
- ip_vs_unlink_service(svc);
+ list_for_each_entry_safe(svc, nxt, &ip_vs_svc_table[idx],
+ s_list) {
+ if (net_eq(svc->net, net))
+ ip_vs_unlink_service(svc);
}
}
@@ -1448,7 +1435,8 @@ static int ip_vs_flush(void)
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry_safe(svc, nxt,
&ip_vs_svc_fwm_table[idx], f_list) {
- ip_vs_unlink_service(svc);
+ if (net_eq(svc->net, net))
+ ip_vs_unlink_service(svc);
}
}
@@ -1472,24 +1460,26 @@ static int ip_vs_zero_service(struct ip_vs_service *svc)
return 0;
}
-static int ip_vs_zero_all(void)
+static int ip_vs_zero_all(struct net *net)
{
int idx;
struct ip_vs_service *svc;
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
- ip_vs_zero_service(svc);
+ if (net_eq(svc->net, net))
+ ip_vs_zero_service(svc);
}
}
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
- ip_vs_zero_service(svc);
+ if (net_eq(svc->net, net))
+ ip_vs_zero_service(svc);
}
}
- ip_vs_zero_stats(&ip_vs_stats);
+ ip_vs_zero_stats(net_ipvs(net)->tot_stats);
return 0;
}
@@ -1498,6 +1488,7 @@ static int
proc_do_defense_mode(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
+ struct net *net = current->nsproxy->net_ns;
int *valp = table->data;
int val = *valp;
int rc;
@@ -1508,7 +1499,7 @@ proc_do_defense_mode(ctl_table *table, int write,
/* Restore the correct value */
*valp = val;
} else {
- update_defense_level();
+ update_defense_level(net_ipvs(net));
}
}
return rc;
@@ -1534,45 +1525,54 @@ proc_do_sync_threshold(ctl_table *table, int write,
return rc;
}
+static int
+proc_do_sync_mode(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int *valp = table->data;
+ int val = *valp;
+ int rc;
+
+ rc = proc_dointvec(table, write, buffer, lenp, ppos);
+ if (write && (*valp != val)) {
+ if ((*valp < 0) || (*valp > 1)) {
+ /* Restore the correct value */
+ *valp = val;
+ } else {
+ struct net *net = current->nsproxy->net_ns;
+ ip_vs_sync_switch_mode(net, val);
+ }
+ }
+ return rc;
+}
/*
* IPVS sysctl table (under the /proc/sys/net/ipv4/vs/)
+ * Do not change order or insert new entries without
+ * align with netns init in __ip_vs_control_init()
*/
static struct ctl_table vs_vars[] = {
{
.procname = "amemthresh",
- .data = &sysctl_ip_vs_amemthresh,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
-#ifdef CONFIG_IP_VS_DEBUG
- {
- .procname = "debug_level",
- .data = &sysctl_ip_vs_debug_level,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
-#endif
{
.procname = "am_droprate",
- .data = &sysctl_ip_vs_am_droprate,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "drop_entry",
- .data = &sysctl_ip_vs_drop_entry,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_defense_mode,
},
{
.procname = "drop_packet",
- .data = &sysctl_ip_vs_drop_packet,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_defense_mode,
@@ -1580,7 +1580,6 @@ static struct ctl_table vs_vars[] = {
#ifdef CONFIG_IP_VS_NFCT
{
.procname = "conntrack",
- .data = &sysctl_ip_vs_conntrack,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
@@ -1588,18 +1587,62 @@ static struct ctl_table vs_vars[] = {
#endif
{
.procname = "secure_tcp",
- .data = &sysctl_ip_vs_secure_tcp,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_do_defense_mode,
},
{
.procname = "snat_reroute",
- .data = &sysctl_ip_vs_snat_reroute,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
+ {
+ .procname = "sync_version",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_do_sync_mode,
+ },
+ {
+ .procname = "cache_bypass",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "expire_nodest_conn",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "expire_quiescent_template",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "sync_threshold",
+ .maxlen =
+ sizeof(((struct netns_ipvs *)0)->sysctl_sync_threshold),
+ .mode = 0644,
+ .proc_handler = proc_do_sync_threshold,
+ },
+ {
+ .procname = "nat_icmp_send",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#ifdef CONFIG_IP_VS_DEBUG
+ {
+ .procname = "debug_level",
+ .data = &sysctl_ip_vs_debug_level,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#endif
#if 0
{
.procname = "timeout_established",
@@ -1686,41 +1729,6 @@ static struct ctl_table vs_vars[] = {
.proc_handler = proc_dointvec_jiffies,
},
#endif
- {
- .procname = "cache_bypass",
- .data = &sysctl_ip_vs_cache_bypass,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "expire_nodest_conn",
- .data = &sysctl_ip_vs_expire_nodest_conn,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "expire_quiescent_template",
- .data = &sysctl_ip_vs_expire_quiescent_template,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "sync_threshold",
- .data = &sysctl_ip_vs_sync_threshold,
- .maxlen = sizeof(sysctl_ip_vs_sync_threshold),
- .mode = 0644,
- .proc_handler = proc_do_sync_threshold,
- },
- {
- .procname = "nat_icmp_send",
- .data = &sysctl_ip_vs_nat_icmp_send,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
{ }
};
@@ -1732,11 +1740,10 @@ const struct ctl_path net_vs_ctl_path[] = {
};
EXPORT_SYMBOL_GPL(net_vs_ctl_path);
-static struct ctl_table_header * sysctl_header;
-
#ifdef CONFIG_PROC_FS
struct ip_vs_iter {
+ struct seq_net_private p; /* Do not move this, netns depends upon it*/
struct list_head *table;
int bucket;
};
@@ -1763,6 +1770,7 @@ static inline const char *ip_vs_fwd_name(unsigned flags)
/* Get the Nth entry in the two lists */
static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
{
+ struct net *net = seq_file_net(seq);
struct ip_vs_iter *iter = seq->private;
int idx;
struct ip_vs_service *svc;
@@ -1770,7 +1778,7 @@ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
/* look in hash by protocol */
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
- if (pos-- == 0){
+ if (net_eq(svc->net, net) && pos-- == 0) {
iter->table = ip_vs_svc_table;
iter->bucket = idx;
return svc;
@@ -1781,7 +1789,7 @@ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
/* keep looking in fwmark */
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
- if (pos-- == 0) {
+ if (net_eq(svc->net, net) && pos-- == 0) {
iter->table = ip_vs_svc_fwm_table;
iter->bucket = idx;
return svc;
@@ -1935,7 +1943,7 @@ static const struct seq_operations ip_vs_info_seq_ops = {
static int ip_vs_info_open(struct inode *inode, struct file *file)
{
- return seq_open_private(file, &ip_vs_info_seq_ops,
+ return seq_open_net(inode, file, &ip_vs_info_seq_ops,
sizeof(struct ip_vs_iter));
}
@@ -1949,13 +1957,11 @@ static const struct file_operations ip_vs_info_fops = {
#endif
-struct ip_vs_stats ip_vs_stats = {
- .lock = __SPIN_LOCK_UNLOCKED(ip_vs_stats.lock),
-};
-
#ifdef CONFIG_PROC_FS
static int ip_vs_stats_show(struct seq_file *seq, void *v)
{
+ struct net *net = seq_file_single_net(seq);
+ struct ip_vs_stats *tot_stats = net_ipvs(net)->tot_stats;
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
@@ -1963,29 +1969,29 @@ static int ip_vs_stats_show(struct seq_file *seq, void *v)
seq_printf(seq,
" Conns Packets Packets Bytes Bytes\n");
- spin_lock_bh(&ip_vs_stats.lock);
- seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", ip_vs_stats.ustats.conns,
- ip_vs_stats.ustats.inpkts, ip_vs_stats.ustats.outpkts,
- (unsigned long long) ip_vs_stats.ustats.inbytes,
- (unsigned long long) ip_vs_stats.ustats.outbytes);
+ spin_lock_bh(&tot_stats->lock);
+ seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", tot_stats->ustats.conns,
+ tot_stats->ustats.inpkts, tot_stats->ustats.outpkts,
+ (unsigned long long) tot_stats->ustats.inbytes,
+ (unsigned long long) tot_stats->ustats.outbytes);
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
" Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
seq_printf(seq,"%8X %8X %8X %16X %16X\n",
- ip_vs_stats.ustats.cps,
- ip_vs_stats.ustats.inpps,
- ip_vs_stats.ustats.outpps,
- ip_vs_stats.ustats.inbps,
- ip_vs_stats.ustats.outbps);
- spin_unlock_bh(&ip_vs_stats.lock);
+ tot_stats->ustats.cps,
+ tot_stats->ustats.inpps,
+ tot_stats->ustats.outpps,
+ tot_stats->ustats.inbps,
+ tot_stats->ustats.outbps);
+ spin_unlock_bh(&tot_stats->lock);
return 0;
}
static int ip_vs_stats_seq_open(struct inode *inode, struct file *file)
{
- return single_open(file, ip_vs_stats_show, NULL);
+ return single_open_net(inode, file, ip_vs_stats_show);
}
static const struct file_operations ip_vs_stats_fops = {
@@ -1996,13 +2002,70 @@ static const struct file_operations ip_vs_stats_fops = {
.release = single_release,
};
+static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
+{
+ struct net *net = seq_file_single_net(seq);
+ struct ip_vs_stats *tot_stats = net_ipvs(net)->tot_stats;
+ int i;
+
+/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
+ seq_puts(seq,
+ " Total Incoming Outgoing Incoming Outgoing\n");
+ seq_printf(seq,
+ "CPU Conns Packets Packets Bytes Bytes\n");
+
+ for_each_possible_cpu(i) {
+ struct ip_vs_cpu_stats *u = per_cpu_ptr(net->ipvs->cpustats, i);
+ seq_printf(seq, "%3X %8X %8X %8X %16LX %16LX\n",
+ i, u->ustats.conns, u->ustats.inpkts,
+ u->ustats.outpkts, (__u64)u->ustats.inbytes,
+ (__u64)u->ustats.outbytes);
+ }
+
+ spin_lock_bh(&tot_stats->lock);
+ seq_printf(seq, " ~ %8X %8X %8X %16LX %16LX\n\n",
+ tot_stats->ustats.conns, tot_stats->ustats.inpkts,
+ tot_stats->ustats.outpkts,
+ (unsigned long long) tot_stats->ustats.inbytes,
+ (unsigned long long) tot_stats->ustats.outbytes);
+
+/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
+ seq_puts(seq,
+ " Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
+ seq_printf(seq, " %8X %8X %8X %16X %16X\n",
+ tot_stats->ustats.cps,
+ tot_stats->ustats.inpps,
+ tot_stats->ustats.outpps,
+ tot_stats->ustats.inbps,
+ tot_stats->ustats.outbps);
+ spin_unlock_bh(&tot_stats->lock);
+
+ return 0;
+}
+
+static int ip_vs_stats_percpu_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open_net(inode, file, ip_vs_stats_percpu_show);
+}
+
+static const struct file_operations ip_vs_stats_percpu_fops = {
+ .owner = THIS_MODULE,
+ .open = ip_vs_stats_percpu_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif
/*
* Set timeout values for tcp tcpfin udp in the timeout_table.
*/
-static int ip_vs_set_timeout(struct ip_vs_timeout_user *u)
+static int ip_vs_set_timeout(struct net *net, struct ip_vs_timeout_user *u)
{
+#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP)
+ struct ip_vs_proto_data *pd;
+#endif
+
IP_VS_DBG(2, "Setting timeout tcp:%d tcpfin:%d udp:%d\n",
u->tcp_timeout,
u->tcp_fin_timeout,
@@ -2010,19 +2073,22 @@ static int ip_vs_set_timeout(struct ip_vs_timeout_user *u)
#ifdef CONFIG_IP_VS_PROTO_TCP
if (u->tcp_timeout) {
- ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_ESTABLISHED]
+ pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+ pd->timeout_table[IP_VS_TCP_S_ESTABLISHED]
= u->tcp_timeout * HZ;
}
if (u->tcp_fin_timeout) {
- ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_FIN_WAIT]
+ pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+ pd->timeout_table[IP_VS_TCP_S_FIN_WAIT]
= u->tcp_fin_timeout * HZ;
}
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
if (u->udp_timeout) {
- ip_vs_protocol_udp.timeout_table[IP_VS_UDP_S_NORMAL]
+ pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
+ pd->timeout_table[IP_VS_UDP_S_NORMAL]
= u->udp_timeout * HZ;
}
#endif
@@ -2087,6 +2153,7 @@ static void ip_vs_copy_udest_compat(struct ip_vs_dest_user_kern *udest,
static int
do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
{
+ struct net *net = sock_net(sk);
int ret;
unsigned char arg[MAX_ARG_LEN];
struct ip_vs_service_user *usvc_compat;
@@ -2121,19 +2188,20 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
if (cmd == IP_VS_SO_SET_FLUSH) {
/* Flush the virtual service */
- ret = ip_vs_flush();
+ ret = ip_vs_flush(net);
goto out_unlock;
} else if (cmd == IP_VS_SO_SET_TIMEOUT) {
/* Set timeout values for (tcp tcpfin udp) */
- ret = ip_vs_set_timeout((struct ip_vs_timeout_user *)arg);
+ ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg);
goto out_unlock;
} else if (cmd == IP_VS_SO_SET_STARTDAEMON) {
struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
- ret = start_sync_thread(dm->state, dm->mcast_ifn, dm->syncid);
+ ret = start_sync_thread(net, dm->state, dm->mcast_ifn,
+ dm->syncid);
goto out_unlock;
} else if (cmd == IP_VS_SO_SET_STOPDAEMON) {
struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
- ret = stop_sync_thread(dm->state);
+ ret = stop_sync_thread(net, dm->state);
goto out_unlock;
}
@@ -2148,7 +2216,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
if (cmd == IP_VS_SO_SET_ZERO) {
/* if no service address is set, zero counters in all */
if (!usvc.fwmark && !usvc.addr.ip && !usvc.port) {
- ret = ip_vs_zero_all();
+ ret = ip_vs_zero_all(net);
goto out_unlock;
}
}
@@ -2165,10 +2233,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
/* Lookup the exact service by <protocol, addr, port> or fwmark */
if (usvc.fwmark == 0)
- svc = __ip_vs_service_find(usvc.af, usvc.protocol,
+ svc = __ip_vs_service_find(net, usvc.af, usvc.protocol,
&usvc.addr, usvc.port);
else
- svc = __ip_vs_svc_fwm_find(usvc.af, usvc.fwmark);
+ svc = __ip_vs_svc_fwm_find(net, usvc.af, usvc.fwmark);
if (cmd != IP_VS_SO_SET_ADD
&& (svc == NULL || svc->protocol != usvc.protocol)) {
@@ -2181,7 +2249,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
if (svc != NULL)
ret = -EEXIST;
else
- ret = ip_vs_add_service(&usvc, &svc);
+ ret = ip_vs_add_service(net, &usvc, &svc);
break;
case IP_VS_SO_SET_EDIT:
ret = ip_vs_edit_service(svc, &usvc);
@@ -2241,7 +2309,8 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
}
static inline int
-__ip_vs_get_service_entries(const struct ip_vs_get_services *get,
+__ip_vs_get_service_entries(struct net *net,
+ const struct ip_vs_get_services *get,
struct ip_vs_get_services __user *uptr)
{
int idx, count=0;
@@ -2252,7 +2321,7 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
/* Only expose IPv4 entries to old interface */
- if (svc->af != AF_INET)
+ if (svc->af != AF_INET || !net_eq(svc->net, net))
continue;
if (count >= get->num_services)
@@ -2271,7 +2340,7 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
/* Only expose IPv4 entries to old interface */
- if (svc->af != AF_INET)
+ if (svc->af != AF_INET || !net_eq(svc->net, net))
continue;
if (count >= get->num_services)
@@ -2291,7 +2360,7 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
}
static inline int
-__ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
+__ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
struct ip_vs_get_dests __user *uptr)
{
struct ip_vs_service *svc;
@@ -2299,9 +2368,9 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
int ret = 0;
if (get->fwmark)
- svc = __ip_vs_svc_fwm_find(AF_INET, get->fwmark);
+ svc = __ip_vs_svc_fwm_find(net, AF_INET, get->fwmark);
else
- svc = __ip_vs_service_find(AF_INET, get->protocol, &addr,
+ svc = __ip_vs_service_find(net, AF_INET, get->protocol, &addr,
get->port);
if (svc) {
@@ -2336,17 +2405,21 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
}
static inline void
-__ip_vs_get_timeouts(struct ip_vs_timeout_user *u)
+__ip_vs_get_timeouts(struct net *net, struct ip_vs_timeout_user *u)
{
+#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP)
+ struct ip_vs_proto_data *pd;
+#endif
+
#ifdef CONFIG_IP_VS_PROTO_TCP
- u->tcp_timeout =
- ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ;
- u->tcp_fin_timeout =
- ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ;
+ pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+ u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ;
+ u->tcp_fin_timeout = pd->timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ;
#endif
#ifdef CONFIG_IP_VS_PROTO_UDP
+ pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
u->udp_timeout =
- ip_vs_protocol_udp.timeout_table[IP_VS_UDP_S_NORMAL] / HZ;
+ pd->timeout_table[IP_VS_UDP_S_NORMAL] / HZ;
#endif
}
@@ -2375,7 +2448,10 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
unsigned char arg[128];
int ret = 0;
unsigned int copylen;
+ struct net *net = sock_net(sk);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ BUG_ON(!net);
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -2418,7 +2494,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
struct ip_vs_getinfo info;
info.version = IP_VS_VERSION_CODE;
info.size = ip_vs_conn_tab_size;
- info.num_services = ip_vs_num_services;
+ info.num_services = ipvs->num_services;
if (copy_to_user(user, &info, sizeof(info)) != 0)
ret = -EFAULT;
}
@@ -2437,7 +2513,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
ret = -EINVAL;
goto out;
}
- ret = __ip_vs_get_service_entries(get, user);
+ ret = __ip_vs_get_service_entries(net, get, user);
}
break;
@@ -2450,10 +2526,11 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
entry = (struct ip_vs_service_entry *)arg;
addr.ip = entry->addr;
if (entry->fwmark)
- svc = __ip_vs_svc_fwm_find(AF_INET, entry->fwmark);
+ svc = __ip_vs_svc_fwm_find(net, AF_INET, entry->fwmark);
else
- svc = __ip_vs_service_find(AF_INET, entry->protocol,
- &addr, entry->port);
+ svc = __ip_vs_service_find(net, AF_INET,
+ entry->protocol, &addr,
+ entry->port);
if (svc) {
ip_vs_copy_service(entry, svc);
if (copy_to_user(user, entry, sizeof(*entry)) != 0)
@@ -2476,7 +2553,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
ret = -EINVAL;
goto out;
}
- ret = __ip_vs_get_dest_entries(get, user);
+ ret = __ip_vs_get_dest_entries(net, get, user);
}
break;
@@ -2484,7 +2561,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
struct ip_vs_timeout_user t;
- __ip_vs_get_timeouts(&t);
+ __ip_vs_get_timeouts(net, &t);
if (copy_to_user(user, &t, sizeof(t)) != 0)
ret = -EFAULT;
}
@@ -2495,15 +2572,17 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
struct ip_vs_daemon_user d[2];
memset(&d, 0, sizeof(d));
- if (ip_vs_sync_state & IP_VS_STATE_MASTER) {
+ if (ipvs->sync_state & IP_VS_STATE_MASTER) {
d[0].state = IP_VS_STATE_MASTER;
- strlcpy(d[0].mcast_ifn, ip_vs_master_mcast_ifn, sizeof(d[0].mcast_ifn));
- d[0].syncid = ip_vs_master_syncid;
+ strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
+ sizeof(d[0].mcast_ifn));
+ d[0].syncid = ipvs->master_syncid;
}
- if (ip_vs_sync_state & IP_VS_STATE_BACKUP) {
+ if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
d[1].state = IP_VS_STATE_BACKUP;
- strlcpy(d[1].mcast_ifn, ip_vs_backup_mcast_ifn, sizeof(d[1].mcast_ifn));
- d[1].syncid = ip_vs_backup_syncid;
+ strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
+ sizeof(d[1].mcast_ifn));
+ d[1].syncid = ipvs->backup_syncid;
}
if (copy_to_user(user, &d, sizeof(d)) != 0)
ret = -EFAULT;
@@ -2542,6 +2621,7 @@ static struct genl_family ip_vs_genl_family = {
.name = IPVS_GENL_NAME,
.version = IPVS_GENL_VERSION,
.maxattr = IPVS_CMD_MAX,
+ .netnsok = true, /* Make ipvsadm to work on netns */
};
/* Policy used for first-level command attributes */
@@ -2696,11 +2776,12 @@ static int ip_vs_genl_dump_services(struct sk_buff *skb,
int idx = 0, i;
int start = cb->args[0];
struct ip_vs_service *svc;
+ struct net *net = skb_sknet(skb);
mutex_lock(&__ip_vs_mutex);
for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
list_for_each_entry(svc, &ip_vs_svc_table[i], s_list) {
- if (++idx <= start)
+ if (++idx <= start || !net_eq(svc->net, net))
continue;
if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
idx--;
@@ -2711,7 +2792,7 @@ static int ip_vs_genl_dump_services(struct sk_buff *skb,
for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
list_for_each_entry(svc, &ip_vs_svc_fwm_table[i], f_list) {
- if (++idx <= start)
+ if (++idx <= start || !net_eq(svc->net, net))
continue;
if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
idx--;
@@ -2727,7 +2808,8 @@ nla_put_failure:
return skb->len;
}
-static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
+static int ip_vs_genl_parse_service(struct net *net,
+ struct ip_vs_service_user_kern *usvc,
struct nlattr *nla, int full_entry,
struct ip_vs_service **ret_svc)
{
@@ -2770,9 +2852,9 @@ static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
}
if (usvc->fwmark)
- svc = __ip_vs_svc_fwm_find(usvc->af, usvc->fwmark);
+ svc = __ip_vs_svc_fwm_find(net, usvc->af, usvc->fwmark);
else
- svc = __ip_vs_service_find(usvc->af, usvc->protocol,
+ svc = __ip_vs_service_find(net, usvc->af, usvc->protocol,
&usvc->addr, usvc->port);
*ret_svc = svc;
@@ -2809,13 +2891,14 @@ static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
return 0;
}
-static struct ip_vs_service *ip_vs_genl_find_service(struct nlattr *nla)
+static struct ip_vs_service *ip_vs_genl_find_service(struct net *net,
+ struct nlattr *nla)
{
struct ip_vs_service_user_kern usvc;
struct ip_vs_service *svc;
int ret;
- ret = ip_vs_genl_parse_service(&usvc, nla, 0, &svc);
+ ret = ip_vs_genl_parse_service(net, &usvc, nla, 0, &svc);
return ret ? ERR_PTR(ret) : svc;
}
@@ -2883,6 +2966,7 @@ static int ip_vs_genl_dump_dests(struct sk_buff *skb,
struct ip_vs_service *svc;
struct ip_vs_dest *dest;
struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1];
+ struct net *net = skb_sknet(skb);
mutex_lock(&__ip_vs_mutex);
@@ -2891,7 +2975,8 @@ static int ip_vs_genl_dump_dests(struct sk_buff *skb,
IPVS_CMD_ATTR_MAX, ip_vs_cmd_policy))
goto out_err;
- svc = ip_vs_genl_find_service(attrs[IPVS_CMD_ATTR_SERVICE]);
+
+ svc = ip_vs_genl_find_service(net, attrs[IPVS_CMD_ATTR_SERVICE]);
if (IS_ERR(svc) || svc == NULL)
goto out_err;
@@ -3005,20 +3090,23 @@ nla_put_failure:
static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
struct netlink_callback *cb)
{
+ struct net *net = skb_net(skb);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
mutex_lock(&__ip_vs_mutex);
- if ((ip_vs_sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
+ if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER,
- ip_vs_master_mcast_ifn,
- ip_vs_master_syncid, cb) < 0)
+ ipvs->master_mcast_ifn,
+ ipvs->master_syncid, cb) < 0)
goto nla_put_failure;
cb->args[0] = 1;
}
- if ((ip_vs_sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
+ if ((ipvs->sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_BACKUP,
- ip_vs_backup_mcast_ifn,
- ip_vs_backup_syncid, cb) < 0)
+ ipvs->backup_mcast_ifn,
+ ipvs->backup_syncid, cb) < 0)
goto nla_put_failure;
cb->args[1] = 1;
@@ -3030,31 +3118,33 @@ nla_put_failure:
return skb->len;
}
-static int ip_vs_genl_new_daemon(struct nlattr **attrs)
+static int ip_vs_genl_new_daemon(struct net *net, struct nlattr **attrs)
{
if (!(attrs[IPVS_DAEMON_ATTR_STATE] &&
attrs[IPVS_DAEMON_ATTR_MCAST_IFN] &&
attrs[IPVS_DAEMON_ATTR_SYNC_ID]))
return -EINVAL;
- return start_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]),
+ return start_sync_thread(net,
+ nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]),
nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]),
nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]));
}
-static int ip_vs_genl_del_daemon(struct nlattr **attrs)
+static int ip_vs_genl_del_daemon(struct net *net, struct nlattr **attrs)
{
if (!attrs[IPVS_DAEMON_ATTR_STATE])
return -EINVAL;
- return stop_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
+ return stop_sync_thread(net,
+ nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
}
-static int ip_vs_genl_set_config(struct nlattr **attrs)
+static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs)
{
struct ip_vs_timeout_user t;
- __ip_vs_get_timeouts(&t);
+ __ip_vs_get_timeouts(net, &t);
if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP])
t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]);
@@ -3066,7 +3156,7 @@ static int ip_vs_genl_set_config(struct nlattr **attrs)
if (attrs[IPVS_CMD_ATTR_TIMEOUT_UDP])
t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]);
- return ip_vs_set_timeout(&t);
+ return ip_vs_set_timeout(net, &t);
}
static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
@@ -3076,16 +3166,20 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
struct ip_vs_dest_user_kern udest;
int ret = 0, cmd;
int need_full_svc = 0, need_full_dest = 0;
+ struct net *net;
+ struct netns_ipvs *ipvs;
+ net = skb_sknet(skb);
+ ipvs = net_ipvs(net);
cmd = info->genlhdr->cmd;
mutex_lock(&__ip_vs_mutex);
if (cmd == IPVS_CMD_FLUSH) {
- ret = ip_vs_flush();
+ ret = ip_vs_flush(net);
goto out;
} else if (cmd == IPVS_CMD_SET_CONFIG) {
- ret = ip_vs_genl_set_config(info->attrs);
+ ret = ip_vs_genl_set_config(net, info->attrs);
goto out;
} else if (cmd == IPVS_CMD_NEW_DAEMON ||
cmd == IPVS_CMD_DEL_DAEMON) {
@@ -3101,13 +3195,13 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
}
if (cmd == IPVS_CMD_NEW_DAEMON)
- ret = ip_vs_genl_new_daemon(daemon_attrs);
+ ret = ip_vs_genl_new_daemon(net, daemon_attrs);
else
- ret = ip_vs_genl_del_daemon(daemon_attrs);
+ ret = ip_vs_genl_del_daemon(net, daemon_attrs);
goto out;
} else if (cmd == IPVS_CMD_ZERO &&
!info->attrs[IPVS_CMD_ATTR_SERVICE]) {
- ret = ip_vs_zero_all();
+ ret = ip_vs_zero_all(net);
goto out;
}
@@ -3117,7 +3211,7 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
if (cmd == IPVS_CMD_NEW_SERVICE || cmd == IPVS_CMD_SET_SERVICE)
need_full_svc = 1;
- ret = ip_vs_genl_parse_service(&usvc,
+ ret = ip_vs_genl_parse_service(net, &usvc,
info->attrs[IPVS_CMD_ATTR_SERVICE],
need_full_svc, &svc);
if (ret)
@@ -3147,7 +3241,7 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
switch (cmd) {
case IPVS_CMD_NEW_SERVICE:
if (svc == NULL)
- ret = ip_vs_add_service(&usvc, &svc);
+ ret = ip_vs_add_service(net, &usvc, &svc);
else
ret = -EEXIST;
break;
@@ -3185,7 +3279,11 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
struct sk_buff *msg;
void *reply;
int ret, cmd, reply_cmd;
+ struct net *net;
+ struct netns_ipvs *ipvs;
+ net = skb_sknet(skb);
+ ipvs = net_ipvs(net);
cmd = info->genlhdr->cmd;
if (cmd == IPVS_CMD_GET_SERVICE)
@@ -3214,7 +3312,8 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
{
struct ip_vs_service *svc;
- svc = ip_vs_genl_find_service(info->attrs[IPVS_CMD_ATTR_SERVICE]);
+ svc = ip_vs_genl_find_service(net,
+ info->attrs[IPVS_CMD_ATTR_SERVICE]);
if (IS_ERR(svc)) {
ret = PTR_ERR(svc);
goto out_err;
@@ -3234,7 +3333,7 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
{
struct ip_vs_timeout_user t;
- __ip_vs_get_timeouts(&t);
+ __ip_vs_get_timeouts(net, &t);
#ifdef CONFIG_IP_VS_PROTO_TCP
NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout);
NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
@@ -3380,62 +3479,173 @@ static void ip_vs_genl_unregister(void)
/* End of Generic Netlink interface definitions */
+/*
+ * per netns intit/exit func.
+ */
+int __net_init __ip_vs_control_init(struct net *net)
+{
+ int idx;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ctl_table *tbl;
+
+ atomic_set(&ipvs->dropentry, 0);
+ spin_lock_init(&ipvs->dropentry_lock);
+ spin_lock_init(&ipvs->droppacket_lock);
+ spin_lock_init(&ipvs->securetcp_lock);
+ ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock);
+
+ /* Initialize rs_table */
+ for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
+ INIT_LIST_HEAD(&ipvs->rs_table[idx]);
+
+ INIT_LIST_HEAD(&ipvs->dest_trash);
+ atomic_set(&ipvs->ftpsvc_counter, 0);
+ atomic_set(&ipvs->nullsvc_counter, 0);
+
+ /* procfs stats */
+ ipvs->tot_stats = kzalloc(sizeof(struct ip_vs_stats), GFP_KERNEL);
+ if (ipvs->tot_stats == NULL) {
+ pr_err("%s(): no memory.\n", __func__);
+ return -ENOMEM;
+ }
+ ipvs->cpustats = alloc_percpu(struct ip_vs_cpu_stats);
+ if (!ipvs->cpustats) {
+ pr_err("%s() alloc_percpu failed\n", __func__);
+ goto err_alloc;
+ }
+ spin_lock_init(&ipvs->tot_stats->lock);
+
+ proc_net_fops_create(net, "ip_vs", 0, &ip_vs_info_fops);
+ proc_net_fops_create(net, "ip_vs_stats", 0, &ip_vs_stats_fops);
+ proc_net_fops_create(net, "ip_vs_stats_percpu", 0,
+ &ip_vs_stats_percpu_fops);
+
+ if (!net_eq(net, &init_net)) {
+ tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL);
+ if (tbl == NULL)
+ goto err_dup;
+ } else
+ tbl = vs_vars;
+ /* Initialize sysctl defaults */
+ idx = 0;
+ ipvs->sysctl_amemthresh = 1024;
+ tbl[idx++].data = &ipvs->sysctl_amemthresh;
+ ipvs->sysctl_am_droprate = 10;
+ tbl[idx++].data = &ipvs->sysctl_am_droprate;
+ tbl[idx++].data = &ipvs->sysctl_drop_entry;
+ tbl[idx++].data = &ipvs->sysctl_drop_packet;
+#ifdef CONFIG_IP_VS_NFCT
+ tbl[idx++].data = &ipvs->sysctl_conntrack;
+#endif
+ tbl[idx++].data = &ipvs->sysctl_secure_tcp;
+ ipvs->sysctl_snat_reroute = 1;
+ tbl[idx++].data = &ipvs->sysctl_snat_reroute;
+ ipvs->sysctl_sync_ver = 1;
+ tbl[idx++].data = &ipvs->sysctl_sync_ver;
+ tbl[idx++].data = &ipvs->sysctl_cache_bypass;
+ tbl[idx++].data = &ipvs->sysctl_expire_nodest_conn;
+ tbl[idx++].data = &ipvs->sysctl_expire_quiescent_template;
+ ipvs->sysctl_sync_threshold[0] = 3;
+ ipvs->sysctl_sync_threshold[1] = 50;
+ tbl[idx].data = &ipvs->sysctl_sync_threshold;
+ tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold);
+ tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
+
+
+#ifdef CONFIG_SYSCTL
+ ipvs->sysctl_hdr = register_net_sysctl_table(net, net_vs_ctl_path,
+ tbl);
+ if (ipvs->sysctl_hdr == NULL) {
+ if (!net_eq(net, &init_net))
+ kfree(tbl);
+ goto err_dup;
+ }
+#endif
+ ip_vs_new_estimator(net, ipvs->tot_stats);
+ ipvs->sysctl_tbl = tbl;
+ /* Schedule defense work */
+ INIT_DELAYED_WORK(&ipvs->defense_work, defense_work_handler);
+ schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
+ return 0;
+
+err_dup:
+ free_percpu(ipvs->cpustats);
+err_alloc:
+ kfree(ipvs->tot_stats);
+ return -ENOMEM;
+}
+
+static void __net_exit __ip_vs_control_cleanup(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ ip_vs_trash_cleanup(net);
+ ip_vs_kill_estimator(net, ipvs->tot_stats);
+ cancel_delayed_work_sync(&ipvs->defense_work);
+ cancel_work_sync(&ipvs->defense_work.work);
+#ifdef CONFIG_SYSCTL
+ unregister_net_sysctl_table(ipvs->sysctl_hdr);
+#endif
+ proc_net_remove(net, "ip_vs_stats_percpu");
+ proc_net_remove(net, "ip_vs_stats");
+ proc_net_remove(net, "ip_vs");
+ free_percpu(ipvs->cpustats);
+ kfree(ipvs->tot_stats);
+}
+
+static struct pernet_operations ipvs_control_ops = {
+ .init = __ip_vs_control_init,
+ .exit = __ip_vs_control_cleanup,
+};
int __init ip_vs_control_init(void)
{
- int ret;
int idx;
+ int ret;
EnterFunction(2);
- /* Initialize ip_vs_svc_table, ip_vs_svc_fwm_table, ip_vs_rtable */
+ /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
}
- for(idx = 0; idx < IP_VS_RTAB_SIZE; idx++) {
- INIT_LIST_HEAD(&ip_vs_rtable[idx]);
+
+ ret = register_pernet_subsys(&ipvs_control_ops);
+ if (ret) {
+ pr_err("cannot register namespace.\n");
+ goto err;
}
- smp_wmb();
+
+ smp_wmb(); /* Do we really need it now ? */
ret = nf_register_sockopt(&ip_vs_sockopts);
if (ret) {
pr_err("cannot register sockopt.\n");
- return ret;
+ goto err_net;
}
ret = ip_vs_genl_register();
if (ret) {
pr_err("cannot register Generic Netlink interface.\n");
nf_unregister_sockopt(&ip_vs_sockopts);
- return ret;
+ goto err_net;
}
- proc_net_fops_create(&init_net, "ip_vs", 0, &ip_vs_info_fops);
- proc_net_fops_create(&init_net, "ip_vs_stats",0, &ip_vs_stats_fops);
-
- sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars);
-
- ip_vs_new_estimator(&ip_vs_stats);
-
- /* Hook the defense timer */
- schedule_delayed_work(&defense_work, DEFENSE_TIMER_PERIOD);
-
LeaveFunction(2);
return 0;
+
+err_net:
+ unregister_pernet_subsys(&ipvs_control_ops);
+err:
+ return ret;
}
void ip_vs_control_cleanup(void)
{
EnterFunction(2);
- ip_vs_trash_cleanup();
- cancel_delayed_work_sync(&defense_work);
- cancel_work_sync(&defense_work.work);
- ip_vs_kill_estimator(&ip_vs_stats);
- unregister_sysctl_table(sysctl_header);
- proc_net_remove(&init_net, "ip_vs_stats");
- proc_net_remove(&init_net, "ip_vs");
+ unregister_pernet_subsys(&ipvs_control_ops);
ip_vs_genl_unregister();
nf_unregister_sockopt(&ip_vs_sockopts);
LeaveFunction(2);
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index ff28801..f560a05 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -8,8 +8,12 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Changes:
- *
+ * Changes: Hans Schillstrom <hans.schillstrom@ericsson.com>
+ * Network name space (netns) aware.
+ * Global data moved to netns i.e struct netns_ipvs
+ * Affected data: est_list and est_lock.
+ * estimation_timer() runs with timer per netns.
+ * get_stats()) do the per cpu summing.
*/
#define KMSG_COMPONENT "IPVS"
@@ -48,11 +52,42 @@
*/
-static void estimation_timer(unsigned long arg);
+/*
+ * Make a summary from each cpu
+ */
+static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
+ struct ip_vs_cpu_stats *stats)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i);
+ unsigned int start;
+ __u64 inbytes, outbytes;
+ if (i) {
+ sum->conns += s->ustats.conns;
+ sum->inpkts += s->ustats.inpkts;
+ sum->outpkts += s->ustats.outpkts;
+ do {
+ start = u64_stats_fetch_begin_bh(&s->syncp);
+ inbytes = s->ustats.inbytes;
+ outbytes = s->ustats.outbytes;
+ } while (u64_stats_fetch_retry_bh(&s->syncp, start));
+ sum->inbytes += inbytes;
+ sum->outbytes += outbytes;
+ } else {
+ sum->conns = s->ustats.conns;
+ sum->inpkts = s->ustats.inpkts;
+ sum->outpkts = s->ustats.outpkts;
+ do {
+ start = u64_stats_fetch_begin_bh(&s->syncp);
+ sum->inbytes = s->ustats.inbytes;
+ sum->outbytes = s->ustats.outbytes;
+ } while (u64_stats_fetch_retry_bh(&s->syncp, start));
+ }
+ }
+}
-static LIST_HEAD(est_list);
-static DEFINE_SPINLOCK(est_lock);
-static DEFINE_TIMER(est_timer, estimation_timer, 0, 0);
static void estimation_timer(unsigned long arg)
{
@@ -62,11 +97,16 @@ static void estimation_timer(unsigned long arg)
u32 n_inpkts, n_outpkts;
u64 n_inbytes, n_outbytes;
u32 rate;
+ struct net *net = (struct net *)arg;
+ struct netns_ipvs *ipvs;
- spin_lock(&est_lock);
- list_for_each_entry(e, &est_list, list) {
+ ipvs = net_ipvs(net);
+ ip_vs_read_cpu_stats(&ipvs->tot_stats->ustats, ipvs->cpustats);
+ spin_lock(&ipvs->est_lock);
+ list_for_each_entry(e, &ipvs->est_list, list) {
s = container_of(e, struct ip_vs_stats, est);
+ ip_vs_read_cpu_stats(&s->ustats, s->cpustats);
spin_lock(&s->lock);
n_conns = s->ustats.conns;
n_inpkts = s->ustats.inpkts;
@@ -75,38 +115,39 @@ static void estimation_timer(unsigned long arg)
n_outbytes = s->ustats.outbytes;
/* scaled by 2^10, but divided 2 seconds */
- rate = (n_conns - e->last_conns)<<9;
+ rate = (n_conns - e->last_conns) << 9;
e->last_conns = n_conns;
- e->cps += ((long)rate - (long)e->cps)>>2;
- s->ustats.cps = (e->cps+0x1FF)>>10;
+ e->cps += ((long)rate - (long)e->cps) >> 2;
+ s->ustats.cps = (e->cps + 0x1FF) >> 10;
- rate = (n_inpkts - e->last_inpkts)<<9;
+ rate = (n_inpkts - e->last_inpkts) << 9;
e->last_inpkts = n_inpkts;
- e->inpps += ((long)rate - (long)e->inpps)>>2;
- s->ustats.inpps = (e->inpps+0x1FF)>>10;
+ e->inpps += ((long)rate - (long)e->inpps) >> 2;
+ s->ustats.inpps = (e->inpps + 0x1FF) >> 10;
- rate = (n_outpkts - e->last_outpkts)<<9;
+ rate = (n_outpkts - e->last_outpkts) << 9;
e->last_outpkts = n_outpkts;
- e->outpps += ((long)rate - (long)e->outpps)>>2;
- s->ustats.outpps = (e->outpps+0x1FF)>>10;
+ e->outpps += ((long)rate - (long)e->outpps) >> 2;
+ s->ustats.outpps = (e->outpps + 0x1FF) >> 10;
- rate = (n_inbytes - e->last_inbytes)<<4;
+ rate = (n_inbytes - e->last_inbytes) << 4;
e->last_inbytes = n_inbytes;
- e->inbps += ((long)rate - (long)e->inbps)>>2;
- s->ustats.inbps = (e->inbps+0xF)>>5;
+ e->inbps += ((long)rate - (long)e->inbps) >> 2;
+ s->ustats.inbps = (e->inbps + 0xF) >> 5;
- rate = (n_outbytes - e->last_outbytes)<<4;
+ rate = (n_outbytes - e->last_outbytes) << 4;
e->last_outbytes = n_outbytes;
- e->outbps += ((long)rate - (long)e->outbps)>>2;
- s->ustats.outbps = (e->outbps+0xF)>>5;
+ e->outbps += ((long)rate - (long)e->outbps) >> 2;
+ s->ustats.outbps = (e->outbps + 0xF) >> 5;
spin_unlock(&s->lock);
}
- spin_unlock(&est_lock);
- mod_timer(&est_timer, jiffies + 2*HZ);
+ spin_unlock(&ipvs->est_lock);
+ mod_timer(&ipvs->est_timer, jiffies + 2*HZ);
}
-void ip_vs_new_estimator(struct ip_vs_stats *stats)
+void ip_vs_new_estimator(struct net *net, struct ip_vs_stats *stats)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_estimator *est = &stats->est;
INIT_LIST_HEAD(&est->list);
@@ -126,18 +167,19 @@ void ip_vs_new_estimator(struct ip_vs_stats *stats)
est->last_outbytes = stats->ustats.outbytes;
est->outbps = stats->ustats.outbps<<5;
- spin_lock_bh(&est_lock);
- list_add(&est->list, &est_list);
- spin_unlock_bh(&est_lock);
+ spin_lock_bh(&ipvs->est_lock);
+ list_add(&est->list, &ipvs->est_list);
+ spin_unlock_bh(&ipvs->est_lock);
}
-void ip_vs_kill_estimator(struct ip_vs_stats *stats)
+void ip_vs_kill_estimator(struct net *net, struct ip_vs_stats *stats)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_estimator *est = &stats->est;
- spin_lock_bh(&est_lock);
+ spin_lock_bh(&ipvs->est_lock);
list_del(&est->list);
- spin_unlock_bh(&est_lock);
+ spin_unlock_bh(&ipvs->est_lock);
}
void ip_vs_zero_estimator(struct ip_vs_stats *stats)
@@ -157,13 +199,35 @@ void ip_vs_zero_estimator(struct ip_vs_stats *stats)
est->outbps = 0;
}
-int __init ip_vs_estimator_init(void)
+static int __net_init __ip_vs_estimator_init(struct net *net)
{
- mod_timer(&est_timer, jiffies + 2 * HZ);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ INIT_LIST_HEAD(&ipvs->est_list);
+ spin_lock_init(&ipvs->est_lock);
+ setup_timer(&ipvs->est_timer, estimation_timer, (unsigned long)net);
+ mod_timer(&ipvs->est_timer, jiffies + 2 * HZ);
return 0;
}
+static void __net_exit __ip_vs_estimator_exit(struct net *net)
+{
+ del_timer_sync(&net_ipvs(net)->est_timer);
+}
+static struct pernet_operations ip_vs_app_ops = {
+ .init = __ip_vs_estimator_init,
+ .exit = __ip_vs_estimator_exit,
+};
+
+int __init ip_vs_estimator_init(void)
+{
+ int rv;
+
+ rv = register_pernet_subsys(&ip_vs_app_ops);
+ return rv;
+}
+
void ip_vs_estimator_cleanup(void)
{
- del_timer_sync(&est_timer);
+ unregister_pernet_subsys(&ip_vs_app_ops);
}
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 7545500..6b5dd6d 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -157,6 +157,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
int ret = 0;
enum ip_conntrack_info ctinfo;
struct nf_conn *ct;
+ struct net *net;
#ifdef CONFIG_IP_VS_IPV6
/* This application helper doesn't work with IPv6 yet,
@@ -197,18 +198,20 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
*/
{
struct ip_vs_conn_param p;
- ip_vs_conn_fill_param(AF_INET, iph->protocol,
- &from, port, &cp->caddr, 0, &p);
+ ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET,
+ iph->protocol, &from, port,
+ &cp->caddr, 0, &p);
n_cp = ip_vs_conn_out_get(&p);
}
if (!n_cp) {
struct ip_vs_conn_param p;
- ip_vs_conn_fill_param(AF_INET, IPPROTO_TCP, &cp->caddr,
+ ip_vs_conn_fill_param(ip_vs_conn_net(cp),
+ AF_INET, IPPROTO_TCP, &cp->caddr,
0, &cp->vaddr, port, &p);
n_cp = ip_vs_conn_new(&p, &from, port,
IP_VS_CONN_F_NO_CPORT |
IP_VS_CONN_F_NFCT,
- cp->dest);
+ cp->dest, skb->mark);
if (!n_cp)
return 0;
@@ -257,8 +260,9 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
* would be adjusted twice.
*/
+ net = skb_net(skb);
cp->app_data = NULL;
- ip_vs_tcp_conn_listen(n_cp);
+ ip_vs_tcp_conn_listen(net, n_cp);
ip_vs_conn_put(n_cp);
return ret;
}
@@ -287,6 +291,7 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
union nf_inet_addr to;
__be16 port;
struct ip_vs_conn *n_cp;
+ struct net *net;
#ifdef CONFIG_IP_VS_IPV6
/* This application helper doesn't work with IPv6 yet,
@@ -358,14 +363,15 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
{
struct ip_vs_conn_param p;
- ip_vs_conn_fill_param(AF_INET, iph->protocol, &to, port,
- &cp->vaddr, htons(ntohs(cp->vport)-1),
- &p);
+ ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET,
+ iph->protocol, &to, port, &cp->vaddr,
+ htons(ntohs(cp->vport)-1), &p);
n_cp = ip_vs_conn_in_get(&p);
if (!n_cp) {
n_cp = ip_vs_conn_new(&p, &cp->daddr,
htons(ntohs(cp->dport)-1),
- IP_VS_CONN_F_NFCT, cp->dest);
+ IP_VS_CONN_F_NFCT, cp->dest,
+ skb->mark);
if (!n_cp)
return 0;
@@ -377,7 +383,8 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
/*
* Move tunnel to listen state
*/
- ip_vs_tcp_conn_listen(n_cp);
+ net = skb_net(skb);
+ ip_vs_tcp_conn_listen(net, n_cp);
ip_vs_conn_put(n_cp);
return 1;
@@ -398,23 +405,22 @@ static struct ip_vs_app ip_vs_ftp = {
.pkt_in = ip_vs_ftp_in,
};
-
/*
- * ip_vs_ftp initialization
+ * per netns ip_vs_ftp initialization
*/
-static int __init ip_vs_ftp_init(void)
+static int __net_init __ip_vs_ftp_init(struct net *net)
{
int i, ret;
struct ip_vs_app *app = &ip_vs_ftp;
- ret = register_ip_vs_app(app);
+ ret = register_ip_vs_app(net, app);
if (ret)
return ret;
for (i=0; i<IP_VS_APP_MAX_PORTS; i++) {
if (!ports[i])
continue;
- ret = register_ip_vs_app_inc(app, app->protocol, ports[i]);
+ ret = register_ip_vs_app_inc(net, app, app->protocol, ports[i]);
if (ret)
break;
pr_info("%s: loaded support on port[%d] = %d\n",
@@ -422,18 +428,39 @@ static int __init ip_vs_ftp_init(void)
}
if (ret)
- unregister_ip_vs_app(app);
+ unregister_ip_vs_app(net, app);
return ret;
}
+/*
+ * netns exit
+ */
+static void __ip_vs_ftp_exit(struct net *net)
+{
+ struct ip_vs_app *app = &ip_vs_ftp;
+
+ unregister_ip_vs_app(net, app);
+}
+
+static struct pernet_operations ip_vs_ftp_ops = {
+ .init = __ip_vs_ftp_init,
+ .exit = __ip_vs_ftp_exit,
+};
+int __init ip_vs_ftp_init(void)
+{
+ int rv;
+
+ rv = register_pernet_subsys(&ip_vs_ftp_ops);
+ return rv;
+}
/*
* ip_vs_ftp finish.
*/
static void __exit ip_vs_ftp_exit(void)
{
- unregister_ip_vs_app(&ip_vs_ftp);
+ unregister_pernet_subsys(&ip_vs_ftp_ops);
}
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 9323f89..00b5ffa 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -70,7 +70,6 @@
* entries that haven't been touched for a day.
*/
#define COUNT_FOR_FULL_EXPIRATION 30
-static int sysctl_ip_vs_lblc_expiration = 24*60*60*HZ;
/*
@@ -117,7 +116,7 @@ struct ip_vs_lblc_table {
static ctl_table vs_vars_table[] = {
{
.procname = "lblc_expiration",
- .data = &sysctl_ip_vs_lblc_expiration,
+ .data = NULL,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -125,8 +124,6 @@ static ctl_table vs_vars_table[] = {
{ }
};
-static struct ctl_table_header * sysctl_header;
-
static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
{
list_del(&en->list);
@@ -248,6 +245,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
struct ip_vs_lblc_entry *en, *nxt;
unsigned long now = jiffies;
int i, j;
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLC_TAB_MASK;
@@ -255,7 +253,8 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
write_lock(&svc->sched_lock);
list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
if (time_before(now,
- en->lastuse + sysctl_ip_vs_lblc_expiration))
+ en->lastuse +
+ ipvs->sysctl_lblc_expiration))
continue;
ip_vs_lblc_free(en);
@@ -543,23 +542,73 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler =
.schedule = ip_vs_lblc_schedule,
};
+/*
+ * per netns init.
+ */
+static int __net_init __ip_vs_lblc_init(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ if (!net_eq(net, &init_net)) {
+ ipvs->lblc_ctl_table = kmemdup(vs_vars_table,
+ sizeof(vs_vars_table),
+ GFP_KERNEL);
+ if (ipvs->lblc_ctl_table == NULL)
+ return -ENOMEM;
+ } else
+ ipvs->lblc_ctl_table = vs_vars_table;
+ ipvs->sysctl_lblc_expiration = 24*60*60*HZ;
+ ipvs->lblc_ctl_table[0].data = &ipvs->sysctl_lblc_expiration;
+
+#ifdef CONFIG_SYSCTL
+ ipvs->lblc_ctl_header =
+ register_net_sysctl_table(net, net_vs_ctl_path,
+ ipvs->lblc_ctl_table);
+ if (!ipvs->lblc_ctl_header) {
+ if (!net_eq(net, &init_net))
+ kfree(ipvs->lblc_ctl_table);
+ return -ENOMEM;
+ }
+#endif
+
+ return 0;
+}
+
+static void __net_exit __ip_vs_lblc_exit(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+#ifdef CONFIG_SYSCTL
+ unregister_net_sysctl_table(ipvs->lblc_ctl_header);
+#endif
+
+ if (!net_eq(net, &init_net))
+ kfree(ipvs->lblc_ctl_table);
+}
+
+static struct pernet_operations ip_vs_lblc_ops = {
+ .init = __ip_vs_lblc_init,
+ .exit = __ip_vs_lblc_exit,
+};
static int __init ip_vs_lblc_init(void)
{
int ret;
- sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
+ ret = register_pernet_subsys(&ip_vs_lblc_ops);
+ if (ret)
+ return ret;
+
ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
if (ret)
- unregister_sysctl_table(sysctl_header);
+ unregister_pernet_subsys(&ip_vs_lblc_ops);
return ret;
}
-
static void __exit ip_vs_lblc_cleanup(void)
{
- unregister_sysctl_table(sysctl_header);
unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
+ unregister_pernet_subsys(&ip_vs_lblc_ops);
}
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index dbeed8e..bfa25f1 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -70,8 +70,6 @@
* entries that haven't been touched for a day.
*/
#define COUNT_FOR_FULL_EXPIRATION 30
-static int sysctl_ip_vs_lblcr_expiration = 24*60*60*HZ;
-
/*
* for IPVS lblcr entry hash table
@@ -296,7 +294,7 @@ struct ip_vs_lblcr_table {
static ctl_table vs_vars_table[] = {
{
.procname = "lblcr_expiration",
- .data = &sysctl_ip_vs_lblcr_expiration,
+ .data = NULL,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -304,8 +302,6 @@ static ctl_table vs_vars_table[] = {
{ }
};
-static struct ctl_table_header * sysctl_header;
-
static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
{
list_del(&en->list);
@@ -425,14 +421,15 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
unsigned long now = jiffies;
int i, j;
struct ip_vs_lblcr_entry *en, *nxt;
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
write_lock(&svc->sched_lock);
list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
- if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration,
- now))
+ if (time_after(en->lastuse
+ + ipvs->sysctl_lblcr_expiration, now))
continue;
ip_vs_lblcr_free(en);
@@ -664,6 +661,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
read_lock(&svc->sched_lock);
en = ip_vs_lblcr_get(svc->af, tbl, &iph.daddr);
if (en) {
+ struct netns_ipvs *ipvs = net_ipvs(svc->net);
/* We only hold a read lock, but this is atomic */
en->lastuse = jiffies;
@@ -675,7 +673,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
/* More than one destination + enough time passed by, cleanup */
if (atomic_read(&en->set.size) > 1 &&
time_after(jiffies, en->set.lastmod +
- sysctl_ip_vs_lblcr_expiration)) {
+ ipvs->sysctl_lblcr_expiration)) {
struct ip_vs_dest *m;
write_lock(&en->set.lock);
@@ -744,23 +742,73 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
.schedule = ip_vs_lblcr_schedule,
};
+/*
+ * per netns init.
+ */
+static int __net_init __ip_vs_lblcr_init(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ if (!net_eq(net, &init_net)) {
+ ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
+ sizeof(vs_vars_table),
+ GFP_KERNEL);
+ if (ipvs->lblcr_ctl_table == NULL)
+ return -ENOMEM;
+ } else
+ ipvs->lblcr_ctl_table = vs_vars_table;
+ ipvs->sysctl_lblcr_expiration = 24*60*60*HZ;
+ ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration;
+
+#ifdef CONFIG_SYSCTL
+ ipvs->lblcr_ctl_header =
+ register_net_sysctl_table(net, net_vs_ctl_path,
+ ipvs->lblcr_ctl_table);
+ if (!ipvs->lblcr_ctl_header) {
+ if (!net_eq(net, &init_net))
+ kfree(ipvs->lblcr_ctl_table);
+ return -ENOMEM;
+ }
+#endif
+
+ return 0;
+}
+
+static void __net_exit __ip_vs_lblcr_exit(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+#ifdef CONFIG_SYSCTL
+ unregister_net_sysctl_table(ipvs->lblcr_ctl_header);
+#endif
+
+ if (!net_eq(net, &init_net))
+ kfree(ipvs->lblcr_ctl_table);
+}
+
+static struct pernet_operations ip_vs_lblcr_ops = {
+ .init = __ip_vs_lblcr_init,
+ .exit = __ip_vs_lblcr_exit,
+};
static int __init ip_vs_lblcr_init(void)
{
int ret;
- sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
+ ret = register_pernet_subsys(&ip_vs_lblcr_ops);
+ if (ret)
+ return ret;
+
ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
if (ret)
- unregister_sysctl_table(sysctl_header);
+ unregister_pernet_subsys(&ip_vs_lblcr_ops);
return ret;
}
-
static void __exit ip_vs_lblcr_cleanup(void)
{
- unregister_sysctl_table(sysctl_header);
unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
+ unregister_pernet_subsys(&ip_vs_lblcr_ops);
}
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index 4680647..f454c80 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -141,6 +141,7 @@ static void ip_vs_nfct_expect_callback(struct nf_conn *ct,
struct nf_conntrack_tuple *orig, new_reply;
struct ip_vs_conn *cp;
struct ip_vs_conn_param p;
+ struct net *net = nf_ct_net(ct);
if (exp->tuple.src.l3num != PF_INET)
return;
@@ -155,7 +156,7 @@ static void ip_vs_nfct_expect_callback(struct nf_conn *ct,
/* RS->CLIENT */
orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
- ip_vs_conn_fill_param(exp->tuple.src.l3num, orig->dst.protonum,
+ ip_vs_conn_fill_param(net, exp->tuple.src.l3num, orig->dst.protonum,
&orig->src.u3, orig->src.u.tcp.port,
&orig->dst.u3, orig->dst.u.tcp.port, &p);
cp = ip_vs_conn_out_get(&p);
@@ -268,7 +269,8 @@ void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
" for conn " FMT_CONN "\n",
__func__, ARG_TUPLE(&tuple), ARG_CONN(cp));
- h = nf_conntrack_find_get(&init_net, NF_CT_DEFAULT_ZONE, &tuple);
+ h = nf_conntrack_find_get(ip_vs_conn_net(cp), NF_CT_DEFAULT_ZONE,
+ &tuple);
if (h) {
ct = nf_ct_tuplehash_to_ctrack(h);
/* Show what happens instead of calling nf_ct_kill() */
diff --git a/net/netfilter/ipvs/ip_vs_pe.c b/net/netfilter/ipvs/ip_vs_pe.c
index 3414af7..5cf859c 100644
--- a/net/netfilter/ipvs/ip_vs_pe.c
+++ b/net/netfilter/ipvs/ip_vs_pe.c
@@ -29,12 +29,11 @@ void ip_vs_unbind_pe(struct ip_vs_service *svc)
}
/* Get pe in the pe list by name */
-static struct ip_vs_pe *
-ip_vs_pe_getbyname(const char *pe_name)
+struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name)
{
struct ip_vs_pe *pe;
- IP_VS_DBG(2, "%s(): pe_name \"%s\"\n", __func__,
+ IP_VS_DBG(10, "%s(): pe_name \"%s\"\n", __func__,
pe_name);
spin_lock_bh(&ip_vs_pe_lock);
@@ -60,28 +59,22 @@ ip_vs_pe_getbyname(const char *pe_name)
}
/* Lookup pe and try to load it if it doesn't exist */
-struct ip_vs_pe *ip_vs_pe_get(const char *name)
+struct ip_vs_pe *ip_vs_pe_getbyname(const char *name)
{
struct ip_vs_pe *pe;
/* Search for the pe by name */
- pe = ip_vs_pe_getbyname(name);
+ pe = __ip_vs_pe_getbyname(name);
/* If pe not found, load the module and search again */
if (!pe) {
request_module("ip_vs_pe_%s", name);
- pe = ip_vs_pe_getbyname(name);
+ pe = __ip_vs_pe_getbyname(name);
}
return pe;
}
-void ip_vs_pe_put(struct ip_vs_pe *pe)
-{
- if (pe && pe->module)
- module_put(pe->module);
-}
-
/* Register a pe in the pe list */
int register_ip_vs_pe(struct ip_vs_pe *pe)
{
diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
index b8b4e96..0d83bc0 100644
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
@@ -71,6 +71,7 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
struct ip_vs_iphdr iph;
unsigned int dataoff, datalen, matchoff, matchlen;
const char *dptr;
+ int retc;
ip_vs_fill_iphdr(p->af, skb_network_header(skb), &iph);
@@ -83,6 +84,8 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
if (dataoff >= skb->len)
return -EINVAL;
+ if ((retc=skb_linearize(skb)) < 0)
+ return retc;
dptr = skb->data + dataoff;
datalen = skb->len - dataoff;
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index c539983..17484a4 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -60,6 +60,35 @@ static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
return 0;
}
+#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP) || \
+ defined(CONFIG_IP_VS_PROTO_SCTP) || defined(CONFIG_IP_VS_PROTO_AH) || \
+ defined(CONFIG_IP_VS_PROTO_ESP)
+/*
+ * register an ipvs protocols netns related data
+ */
+static int
+register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
+ struct ip_vs_proto_data *pd =
+ kzalloc(sizeof(struct ip_vs_proto_data), GFP_ATOMIC);
+
+ if (!pd) {
+ pr_err("%s(): no memory.\n", __func__);
+ return -ENOMEM;
+ }
+ pd->pp = pp; /* For speed issues */
+ pd->next = ipvs->proto_data_table[hash];
+ ipvs->proto_data_table[hash] = pd;
+ atomic_set(&pd->appcnt, 0); /* Init app counter */
+
+ if (pp->init_netns != NULL)
+ pp->init_netns(net, pd);
+
+ return 0;
+}
+#endif
/*
* unregister an ipvs protocol
@@ -82,6 +111,29 @@ static int unregister_ip_vs_protocol(struct ip_vs_protocol *pp)
return -ESRCH;
}
+/*
+ * unregister an ipvs protocols netns data
+ */
+static int
+unregister_ip_vs_proto_netns(struct net *net, struct ip_vs_proto_data *pd)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_proto_data **pd_p;
+ unsigned hash = IP_VS_PROTO_HASH(pd->pp->protocol);
+
+ pd_p = &ipvs->proto_data_table[hash];
+ for (; *pd_p; pd_p = &(*pd_p)->next) {
+ if (*pd_p == pd) {
+ *pd_p = pd->next;
+ if (pd->pp->exit_netns != NULL)
+ pd->pp->exit_netns(net, pd);
+ kfree(pd);
+ return 0;
+ }
+ }
+
+ return -ESRCH;
+}
/*
* get ip_vs_protocol object by its proto.
@@ -100,19 +152,44 @@ struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto)
}
EXPORT_SYMBOL(ip_vs_proto_get);
+/*
+ * get ip_vs_protocol object data by netns and proto
+ */
+struct ip_vs_proto_data *
+__ipvs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto)
+{
+ struct ip_vs_proto_data *pd;
+ unsigned hash = IP_VS_PROTO_HASH(proto);
+
+ for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) {
+ if (pd->pp->protocol == proto)
+ return pd;
+ }
+
+ return NULL;
+}
+
+struct ip_vs_proto_data *
+ip_vs_proto_data_get(struct net *net, unsigned short proto)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ return __ipvs_proto_data_get(ipvs, proto);
+}
+EXPORT_SYMBOL(ip_vs_proto_data_get);
/*
* Propagate event for state change to all protocols
*/
-void ip_vs_protocol_timeout_change(int flags)
+void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags)
{
- struct ip_vs_protocol *pp;
+ struct ip_vs_proto_data *pd;
int i;
for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
- for (pp = ip_vs_proto_table[i]; pp; pp = pp->next) {
- if (pp->timeout_change)
- pp->timeout_change(pp, flags);
+ for (pd = ipvs->proto_data_table[i]; pd; pd = pd->next) {
+ if (pd->pp->timeout_change)
+ pd->pp->timeout_change(pd, flags);
}
}
}
@@ -236,6 +313,46 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
ip_vs_tcpudp_debug_packet_v4(pp, skb, offset, msg);
}
+/*
+ * per network name-space init
+ */
+static int __net_init __ip_vs_protocol_init(struct net *net)
+{
+#ifdef CONFIG_IP_VS_PROTO_TCP
+ register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_UDP
+ register_ip_vs_proto_netns(net, &ip_vs_protocol_udp);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_SCTP
+ register_ip_vs_proto_netns(net, &ip_vs_protocol_sctp);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_AH
+ register_ip_vs_proto_netns(net, &ip_vs_protocol_ah);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_ESP
+ register_ip_vs_proto_netns(net, &ip_vs_protocol_esp);
+#endif
+ return 0;
+}
+
+static void __net_exit __ip_vs_protocol_cleanup(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_proto_data *pd;
+ int i;
+
+ /* unregister all the ipvs proto data for this netns */
+ for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
+ while ((pd = ipvs->proto_data_table[i]) != NULL)
+ unregister_ip_vs_proto_netns(net, pd);
+ }
+}
+
+static struct pernet_operations ipvs_proto_ops = {
+ .init = __ip_vs_protocol_init,
+ .exit = __ip_vs_protocol_cleanup,
+};
int __init ip_vs_protocol_init(void)
{
@@ -265,6 +382,7 @@ int __init ip_vs_protocol_init(void)
REGISTER_PROTOCOL(&ip_vs_protocol_esp);
#endif
pr_info("Registered protocols (%s)\n", &protocols[2]);
+ return register_pernet_subsys(&ipvs_proto_ops);
return 0;
}
@@ -275,6 +393,7 @@ void ip_vs_protocol_cleanup(void)
struct ip_vs_protocol *pp;
int i;
+ unregister_pernet_subsys(&ipvs_proto_ops);
/* unregister all the ipvs protocols */
for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
while ((pp = ip_vs_proto_table[i]) != NULL)
diff --git a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
index 3a04611..5b8eb8b 100644
--- a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
@@ -41,28 +41,30 @@ struct isakmp_hdr {
#define PORT_ISAKMP 500
static void
-ah_esp_conn_fill_param_proto(int af, const struct ip_vs_iphdr *iph,
- int inverse, struct ip_vs_conn_param *p)
+ah_esp_conn_fill_param_proto(struct net *net, int af,
+ const struct ip_vs_iphdr *iph, int inverse,
+ struct ip_vs_conn_param *p)
{
if (likely(!inverse))
- ip_vs_conn_fill_param(af, IPPROTO_UDP,
+ ip_vs_conn_fill_param(net, af, IPPROTO_UDP,
&iph->saddr, htons(PORT_ISAKMP),
&iph->daddr, htons(PORT_ISAKMP), p);
else
- ip_vs_conn_fill_param(af, IPPROTO_UDP,
+ ip_vs_conn_fill_param(net, af, IPPROTO_UDP,
&iph->daddr, htons(PORT_ISAKMP),
&iph->saddr, htons(PORT_ISAKMP), p);
}
static struct ip_vs_conn *
-ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
+ah_esp_conn_in_get(int af, const struct sk_buff *skb,
const struct ip_vs_iphdr *iph, unsigned int proto_off,
int inverse)
{
struct ip_vs_conn *cp;
struct ip_vs_conn_param p;
+ struct net *net = skb_net(skb);
- ah_esp_conn_fill_param_proto(af, iph, inverse, &p);
+ ah_esp_conn_fill_param_proto(net, af, iph, inverse, &p);
cp = ip_vs_conn_in_get(&p);
if (!cp) {
/*
@@ -72,7 +74,7 @@ ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for outin packet "
"%s%s %s->%s\n",
inverse ? "ICMP+" : "",
- pp->name,
+ ip_vs_proto_get(iph->protocol)->name,
IP_VS_DBG_ADDR(af, &iph->saddr),
IP_VS_DBG_ADDR(af, &iph->daddr));
}
@@ -83,21 +85,21 @@ ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
static struct ip_vs_conn *
ah_esp_conn_out_get(int af, const struct sk_buff *skb,
- struct ip_vs_protocol *pp,
const struct ip_vs_iphdr *iph,
unsigned int proto_off,
int inverse)
{
struct ip_vs_conn *cp;
struct ip_vs_conn_param p;
+ struct net *net = skb_net(skb);
- ah_esp_conn_fill_param_proto(af, iph, inverse, &p);
+ ah_esp_conn_fill_param_proto(net, af, iph, inverse, &p);
cp = ip_vs_conn_out_get(&p);
if (!cp) {
IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for inout packet "
"%s%s %s->%s\n",
inverse ? "ICMP+" : "",
- pp->name,
+ ip_vs_proto_get(iph->protocol)->name,
IP_VS_DBG_ADDR(af, &iph->saddr),
IP_VS_DBG_ADDR(af, &iph->daddr));
}
@@ -107,7 +109,7 @@ ah_esp_conn_out_get(int af, const struct sk_buff *skb,
static int
-ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp)
{
/*
@@ -117,26 +119,14 @@ ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
return 0;
}
-static void ah_esp_init(struct ip_vs_protocol *pp)
-{
- /* nothing to do now */
-}
-
-
-static void ah_esp_exit(struct ip_vs_protocol *pp)
-{
- /* nothing to do now */
-}
-
-
#ifdef CONFIG_IP_VS_PROTO_AH
struct ip_vs_protocol ip_vs_protocol_ah = {
.name = "AH",
.protocol = IPPROTO_AH,
.num_states = 1,
.dont_defrag = 1,
- .init = ah_esp_init,
- .exit = ah_esp_exit,
+ .init = NULL,
+ .exit = NULL,
.conn_schedule = ah_esp_conn_schedule,
.conn_in_get = ah_esp_conn_in_get,
.conn_out_get = ah_esp_conn_out_get,
@@ -149,7 +139,6 @@ struct ip_vs_protocol ip_vs_protocol_ah = {
.app_conn_bind = NULL,
.debug_packet = ip_vs_tcpudp_debug_packet,
.timeout_change = NULL, /* ISAKMP */
- .set_state_timeout = NULL,
};
#endif
@@ -159,8 +148,8 @@ struct ip_vs_protocol ip_vs_protocol_esp = {
.protocol = IPPROTO_ESP,
.num_states = 1,
.dont_defrag = 1,
- .init = ah_esp_init,
- .exit = ah_esp_exit,
+ .init = NULL,
+ .exit = NULL,
.conn_schedule = ah_esp_conn_schedule,
.conn_in_get = ah_esp_conn_in_get,
.conn_out_get = ah_esp_conn_out_get,
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 1ea96bcd..b027ccc 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -9,9 +9,10 @@
#include <net/ip_vs.h>
static int
-sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp)
{
+ struct net *net;
struct ip_vs_service *svc;
sctp_chunkhdr_t _schunkh, *sch;
sctp_sctphdr_t *sh, _sctph;
@@ -27,13 +28,13 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
sizeof(_schunkh), &_schunkh);
if (sch == NULL)
return 0;
-
+ net = skb_net(skb);
if ((sch->type == SCTP_CID_INIT) &&
- (svc = ip_vs_service_get(af, skb->mark, iph.protocol,
+ (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
&iph.daddr, sh->dest))) {
int ignored;
- if (ip_vs_todrop()) {
+ if (ip_vs_todrop(net_ipvs(net))) {
/*
* It seems that we are very loaded.
* We have to drop this packet :(
@@ -46,14 +47,19 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
* Let the virtual server select a real server for the
* incoming connection, and create a connection entry.
*/
- *cpp = ip_vs_schedule(svc, skb, pp, &ignored);
- if (!*cpp && !ignored) {
- *verdict = ip_vs_leave(svc, skb, pp);
+ *cpp = ip_vs_schedule(svc, skb, pd, &ignored);
+ if (!*cpp && ignored <= 0) {
+ if (!ignored)
+ *verdict = ip_vs_leave(svc, skb, pd);
+ else {
+ ip_vs_service_put(svc);
+ *verdict = NF_DROP;
+ }
return 0;
}
ip_vs_service_put(svc);
}
-
+ /* NF_ACCEPT */
return 1;
}
@@ -856,7 +862,7 @@ static struct ipvs_sctp_nextstate
/*
* Timeout table[state]
*/
-static int sctp_timeouts[IP_VS_SCTP_S_LAST + 1] = {
+static const int sctp_timeouts[IP_VS_SCTP_S_LAST + 1] = {
[IP_VS_SCTP_S_NONE] = 2 * HZ,
[IP_VS_SCTP_S_INIT_CLI] = 1 * 60 * HZ,
[IP_VS_SCTP_S_INIT_SER] = 1 * 60 * HZ,
@@ -900,20 +906,8 @@ static const char *sctp_state_name(int state)
return "?";
}
-static void sctp_timeout_change(struct ip_vs_protocol *pp, int flags)
-{
-}
-
-static int
-sctp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
-{
-
-return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_SCTP_S_LAST,
- sctp_state_name_table, sname, to);
-}
-
static inline int
-set_sctp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
+set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
int direction, const struct sk_buff *skb)
{
sctp_chunkhdr_t _sctpch, *sch;
@@ -971,7 +965,7 @@ set_sctp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
IP_VS_DBG_BUF(8, "%s %s %s:%d->"
"%s:%d state: %s->%s conn->refcnt:%d\n",
- pp->name,
+ pd->pp->name,
((direction == IP_VS_DIR_OUTPUT) ?
"output " : "input "),
IP_VS_DBG_ADDR(cp->af, &cp->daddr),
@@ -995,75 +989,73 @@ set_sctp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
}
}
}
+ if (likely(pd))
+ cp->timeout = pd->timeout_table[cp->state = next_state];
+ else /* What to do ? */
+ cp->timeout = sctp_timeouts[cp->state = next_state];
- cp->timeout = pp->timeout_table[cp->state = next_state];
-
- return 1;
+ return 1;
}
static int
sctp_state_transition(struct ip_vs_conn *cp, int direction,
- const struct sk_buff *skb, struct ip_vs_protocol *pp)
+ const struct sk_buff *skb, struct ip_vs_proto_data *pd)
{
int ret = 0;
spin_lock(&cp->lock);
- ret = set_sctp_state(pp, cp, direction, skb);
+ ret = set_sctp_state(pd, cp, direction, skb);
spin_unlock(&cp->lock);
return ret;
}
-/*
- * Hash table for SCTP application incarnations
- */
-#define SCTP_APP_TAB_BITS 4
-#define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS)
-#define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1)
-
-static struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
-static DEFINE_SPINLOCK(sctp_app_lock);
-
static inline __u16 sctp_app_hashkey(__be16 port)
{
return (((__force u16)port >> SCTP_APP_TAB_BITS) ^ (__force u16)port)
& SCTP_APP_TAB_MASK;
}
-static int sctp_register_app(struct ip_vs_app *inc)
+static int sctp_register_app(struct net *net, struct ip_vs_app *inc)
{
struct ip_vs_app *i;
__u16 hash;
__be16 port = inc->port;
int ret = 0;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_SCTP);
hash = sctp_app_hashkey(port);
- spin_lock_bh(&sctp_app_lock);
- list_for_each_entry(i, &sctp_apps[hash], p_list) {
+ spin_lock_bh(&ipvs->sctp_app_lock);
+ list_for_each_entry(i, &ipvs->sctp_apps[hash], p_list) {
if (i->port == port) {
ret = -EEXIST;
goto out;
}
}
- list_add(&inc->p_list, &sctp_apps[hash]);
- atomic_inc(&ip_vs_protocol_sctp.appcnt);
+ list_add(&inc->p_list, &ipvs->sctp_apps[hash]);
+ atomic_inc(&pd->appcnt);
out:
- spin_unlock_bh(&sctp_app_lock);
+ spin_unlock_bh(&ipvs->sctp_app_lock);
return ret;
}
-static void sctp_unregister_app(struct ip_vs_app *inc)
+static void sctp_unregister_app(struct net *net, struct ip_vs_app *inc)
{
- spin_lock_bh(&sctp_app_lock);
- atomic_dec(&ip_vs_protocol_sctp.appcnt);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_SCTP);
+
+ spin_lock_bh(&ipvs->sctp_app_lock);
+ atomic_dec(&pd->appcnt);
list_del(&inc->p_list);
- spin_unlock_bh(&sctp_app_lock);
+ spin_unlock_bh(&ipvs->sctp_app_lock);
}
static int sctp_app_conn_bind(struct ip_vs_conn *cp)
{
+ struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
int hash;
struct ip_vs_app *inc;
int result = 0;
@@ -1074,12 +1066,12 @@ static int sctp_app_conn_bind(struct ip_vs_conn *cp)
/* Lookup application incarnations and bind the right one */
hash = sctp_app_hashkey(cp->vport);
- spin_lock(&sctp_app_lock);
- list_for_each_entry(inc, &sctp_apps[hash], p_list) {
+ spin_lock(&ipvs->sctp_app_lock);
+ list_for_each_entry(inc, &ipvs->sctp_apps[hash], p_list) {
if (inc->port == cp->vport) {
if (unlikely(!ip_vs_app_inc_get(inc)))
break;
- spin_unlock(&sctp_app_lock);
+ spin_unlock(&ipvs->sctp_app_lock);
IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->"
"%s:%u to app %s on port %u\n",
@@ -1095,43 +1087,50 @@ static int sctp_app_conn_bind(struct ip_vs_conn *cp)
goto out;
}
}
- spin_unlock(&sctp_app_lock);
+ spin_unlock(&ipvs->sctp_app_lock);
out:
return result;
}
-static void ip_vs_sctp_init(struct ip_vs_protocol *pp)
+/* ---------------------------------------------
+ * timeouts is netns related now.
+ * ---------------------------------------------
+ */
+static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
{
- IP_VS_INIT_HASH_TABLE(sctp_apps);
- pp->timeout_table = sctp_timeouts;
-}
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ ip_vs_init_hash_table(ipvs->sctp_apps, SCTP_APP_TAB_SIZE);
+ spin_lock_init(&ipvs->sctp_app_lock);
+ pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts,
+ sizeof(sctp_timeouts));
+}
-static void ip_vs_sctp_exit(struct ip_vs_protocol *pp)
+static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd)
{
-
+ kfree(pd->timeout_table);
}
struct ip_vs_protocol ip_vs_protocol_sctp = {
- .name = "SCTP",
- .protocol = IPPROTO_SCTP,
- .num_states = IP_VS_SCTP_S_LAST,
- .dont_defrag = 0,
- .appcnt = ATOMIC_INIT(0),
- .init = ip_vs_sctp_init,
- .exit = ip_vs_sctp_exit,
- .register_app = sctp_register_app,
+ .name = "SCTP",
+ .protocol = IPPROTO_SCTP,
+ .num_states = IP_VS_SCTP_S_LAST,
+ .dont_defrag = 0,
+ .init = NULL,
+ .exit = NULL,
+ .init_netns = __ip_vs_sctp_init,
+ .exit_netns = __ip_vs_sctp_exit,
+ .register_app = sctp_register_app,
.unregister_app = sctp_unregister_app,
- .conn_schedule = sctp_conn_schedule,
- .conn_in_get = ip_vs_conn_in_get_proto,
- .conn_out_get = ip_vs_conn_out_get_proto,
- .snat_handler = sctp_snat_handler,
- .dnat_handler = sctp_dnat_handler,
- .csum_check = sctp_csum_check,
- .state_name = sctp_state_name,
+ .conn_schedule = sctp_conn_schedule,
+ .conn_in_get = ip_vs_conn_in_get_proto,
+ .conn_out_get = ip_vs_conn_out_get_proto,
+ .snat_handler = sctp_snat_handler,
+ .dnat_handler = sctp_dnat_handler,
+ .csum_check = sctp_csum_check,
+ .state_name = sctp_state_name,
.state_transition = sctp_state_transition,
- .app_conn_bind = sctp_app_conn_bind,
- .debug_packet = ip_vs_tcpudp_debug_packet,
- .timeout_change = sctp_timeout_change,
- .set_state_timeout = sctp_set_state_timeout,
+ .app_conn_bind = sctp_app_conn_bind,
+ .debug_packet = ip_vs_tcpudp_debug_packet,
+ .timeout_change = NULL,
};
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index f6c5200..c0cc341 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -9,8 +9,12 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Changes:
+ * Changes: Hans Schillstrom <hans.schillstrom@ericsson.com>
*
+ * Network name space (netns) aware.
+ * Global data moved to netns i.e struct netns_ipvs
+ * tcp_timeouts table has copy per netns in a hash table per
+ * protocol ip_vs_proto_data and is handled by netns
*/
#define KMSG_COMPONENT "IPVS"
@@ -28,9 +32,10 @@
#include <net/ip_vs.h>
static int
-tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp)
{
+ struct net *net;
struct ip_vs_service *svc;
struct tcphdr _tcph, *th;
struct ip_vs_iphdr iph;
@@ -42,14 +47,14 @@ tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
*verdict = NF_DROP;
return 0;
}
-
+ net = skb_net(skb);
/* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */
if (th->syn &&
- (svc = ip_vs_service_get(af, skb->mark, iph.protocol, &iph.daddr,
- th->dest))) {
+ (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
+ &iph.daddr, th->dest))) {
int ignored;
- if (ip_vs_todrop()) {
+ if (ip_vs_todrop(net_ipvs(net))) {
/*
* It seems that we are very loaded.
* We have to drop this packet :(
@@ -63,13 +68,19 @@ tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
* Let the virtual server select a real server for the
* incoming connection, and create a connection entry.
*/
- *cpp = ip_vs_schedule(svc, skb, pp, &ignored);
- if (!*cpp && !ignored) {
- *verdict = ip_vs_leave(svc, skb, pp);
+ *cpp = ip_vs_schedule(svc, skb, pd, &ignored);
+ if (!*cpp && ignored <= 0) {
+ if (!ignored)
+ *verdict = ip_vs_leave(svc, skb, pd);
+ else {
+ ip_vs_service_put(svc);
+ *verdict = NF_DROP;
+ }
return 0;
}
ip_vs_service_put(svc);
}
+ /* NF_ACCEPT */
return 1;
}
@@ -338,7 +349,7 @@ static const int tcp_state_off[IP_VS_DIR_LAST] = {
/*
* Timeout table[state]
*/
-static int tcp_timeouts[IP_VS_TCP_S_LAST+1] = {
+static const int tcp_timeouts[IP_VS_TCP_S_LAST+1] = {
[IP_VS_TCP_S_NONE] = 2*HZ,
[IP_VS_TCP_S_ESTABLISHED] = 15*60*HZ,
[IP_VS_TCP_S_SYN_SENT] = 2*60*HZ,
@@ -437,10 +448,7 @@ static struct tcp_states_t tcp_states_dos [] = {
/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
};
-static struct tcp_states_t *tcp_state_table = tcp_states;
-
-
-static void tcp_timeout_change(struct ip_vs_protocol *pp, int flags)
+static void tcp_timeout_change(struct ip_vs_proto_data *pd, int flags)
{
int on = (flags & 1); /* secure_tcp */
@@ -450,14 +458,7 @@ static void tcp_timeout_change(struct ip_vs_protocol *pp, int flags)
** for most if not for all of the applications. Something
** like "capabilities" (flags) for each object.
*/
- tcp_state_table = (on? tcp_states_dos : tcp_states);
-}
-
-static int
-tcp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
-{
- return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_TCP_S_LAST,
- tcp_state_name_table, sname, to);
+ pd->tcp_state_table = (on ? tcp_states_dos : tcp_states);
}
static inline int tcp_state_idx(struct tcphdr *th)
@@ -474,7 +475,7 @@ static inline int tcp_state_idx(struct tcphdr *th)
}
static inline void
-set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
+set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
int direction, struct tcphdr *th)
{
int state_idx;
@@ -497,7 +498,8 @@ set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
goto tcp_state_out;
}
- new_state = tcp_state_table[state_off+state_idx].next_state[cp->state];
+ new_state =
+ pd->tcp_state_table[state_off+state_idx].next_state[cp->state];
tcp_state_out:
if (new_state != cp->state) {
@@ -505,7 +507,7 @@ set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
IP_VS_DBG_BUF(8, "%s %s [%c%c%c%c] %s:%d->"
"%s:%d state: %s->%s conn->refcnt:%d\n",
- pp->name,
+ pd->pp->name,
((state_off == TCP_DIR_OUTPUT) ?
"output " : "input "),
th->syn ? 'S' : '.',
@@ -535,17 +537,19 @@ set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
}
}
- cp->timeout = pp->timeout_table[cp->state = new_state];
+ if (likely(pd))
+ cp->timeout = pd->timeout_table[cp->state = new_state];
+ else /* What to do ? */
+ cp->timeout = tcp_timeouts[cp->state = new_state];
}
-
/*
* Handle state transitions
*/
static int
tcp_state_transition(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb,
- struct ip_vs_protocol *pp)
+ struct ip_vs_proto_data *pd)
{
struct tcphdr _tcph, *th;
@@ -560,23 +564,12 @@ tcp_state_transition(struct ip_vs_conn *cp, int direction,
return 0;
spin_lock(&cp->lock);
- set_tcp_state(pp, cp, direction, th);
+ set_tcp_state(pd, cp, direction, th);
spin_unlock(&cp->lock);
return 1;
}
-
-/*
- * Hash table for TCP application incarnations
- */
-#define TCP_APP_TAB_BITS 4
-#define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS)
-#define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1)
-
-static struct list_head tcp_apps[TCP_APP_TAB_SIZE];
-static DEFINE_SPINLOCK(tcp_app_lock);
-
static inline __u16 tcp_app_hashkey(__be16 port)
{
return (((__force u16)port >> TCP_APP_TAB_BITS) ^ (__force u16)port)
@@ -584,44 +577,50 @@ static inline __u16 tcp_app_hashkey(__be16 port)
}
-static int tcp_register_app(struct ip_vs_app *inc)
+static int tcp_register_app(struct net *net, struct ip_vs_app *inc)
{
struct ip_vs_app *i;
__u16 hash;
__be16 port = inc->port;
int ret = 0;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
hash = tcp_app_hashkey(port);
- spin_lock_bh(&tcp_app_lock);
- list_for_each_entry(i, &tcp_apps[hash], p_list) {
+ spin_lock_bh(&ipvs->tcp_app_lock);
+ list_for_each_entry(i, &ipvs->tcp_apps[hash], p_list) {
if (i->port == port) {
ret = -EEXIST;
goto out;
}
}
- list_add(&inc->p_list, &tcp_apps[hash]);
- atomic_inc(&ip_vs_protocol_tcp.appcnt);
+ list_add(&inc->p_list, &ipvs->tcp_apps[hash]);
+ atomic_inc(&pd->appcnt);
out:
- spin_unlock_bh(&tcp_app_lock);
+ spin_unlock_bh(&ipvs->tcp_app_lock);
return ret;
}
static void
-tcp_unregister_app(struct ip_vs_app *inc)
+tcp_unregister_app(struct net *net, struct ip_vs_app *inc)
{
- spin_lock_bh(&tcp_app_lock);
- atomic_dec(&ip_vs_protocol_tcp.appcnt);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+
+ spin_lock_bh(&ipvs->tcp_app_lock);
+ atomic_dec(&pd->appcnt);
list_del(&inc->p_list);
- spin_unlock_bh(&tcp_app_lock);
+ spin_unlock_bh(&ipvs->tcp_app_lock);
}
static int
tcp_app_conn_bind(struct ip_vs_conn *cp)
{
+ struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
int hash;
struct ip_vs_app *inc;
int result = 0;
@@ -633,12 +632,12 @@ tcp_app_conn_bind(struct ip_vs_conn *cp)
/* Lookup application incarnations and bind the right one */
hash = tcp_app_hashkey(cp->vport);
- spin_lock(&tcp_app_lock);
- list_for_each_entry(inc, &tcp_apps[hash], p_list) {
+ spin_lock(&ipvs->tcp_app_lock);
+ list_for_each_entry(inc, &ipvs->tcp_apps[hash], p_list) {
if (inc->port == cp->vport) {
if (unlikely(!ip_vs_app_inc_get(inc)))
break;
- spin_unlock(&tcp_app_lock);
+ spin_unlock(&ipvs->tcp_app_lock);
IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
"%s:%u to app %s on port %u\n",
@@ -655,7 +654,7 @@ tcp_app_conn_bind(struct ip_vs_conn *cp)
goto out;
}
}
- spin_unlock(&tcp_app_lock);
+ spin_unlock(&ipvs->tcp_app_lock);
out:
return result;
@@ -665,24 +664,35 @@ tcp_app_conn_bind(struct ip_vs_conn *cp)
/*
* Set LISTEN timeout. (ip_vs_conn_put will setup timer)
*/
-void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp)
+void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp)
{
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+
spin_lock(&cp->lock);
cp->state = IP_VS_TCP_S_LISTEN;
- cp->timeout = ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_LISTEN];
+ cp->timeout = (pd ? pd->timeout_table[IP_VS_TCP_S_LISTEN]
+ : tcp_timeouts[IP_VS_TCP_S_LISTEN]);
spin_unlock(&cp->lock);
}
-
-static void ip_vs_tcp_init(struct ip_vs_protocol *pp)
+/* ---------------------------------------------
+ * timeouts is netns related now.
+ * ---------------------------------------------
+ */
+static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
{
- IP_VS_INIT_HASH_TABLE(tcp_apps);
- pp->timeout_table = tcp_timeouts;
-}
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ ip_vs_init_hash_table(ipvs->tcp_apps, TCP_APP_TAB_SIZE);
+ spin_lock_init(&ipvs->tcp_app_lock);
+ pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts,
+ sizeof(tcp_timeouts));
+ pd->tcp_state_table = tcp_states;
+}
-static void ip_vs_tcp_exit(struct ip_vs_protocol *pp)
+static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd)
{
+ kfree(pd->timeout_table);
}
@@ -691,9 +701,10 @@ struct ip_vs_protocol ip_vs_protocol_tcp = {
.protocol = IPPROTO_TCP,
.num_states = IP_VS_TCP_S_LAST,
.dont_defrag = 0,
- .appcnt = ATOMIC_INIT(0),
- .init = ip_vs_tcp_init,
- .exit = ip_vs_tcp_exit,
+ .init = NULL,
+ .exit = NULL,
+ .init_netns = __ip_vs_tcp_init,
+ .exit_netns = __ip_vs_tcp_exit,
.register_app = tcp_register_app,
.unregister_app = tcp_unregister_app,
.conn_schedule = tcp_conn_schedule,
@@ -707,5 +718,4 @@ struct ip_vs_protocol ip_vs_protocol_tcp = {
.app_conn_bind = tcp_app_conn_bind,
.debug_packet = ip_vs_tcpudp_debug_packet,
.timeout_change = tcp_timeout_change,
- .set_state_timeout = tcp_set_state_timeout,
};
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index 9d106a0..f1282cb 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -9,7 +9,8 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * Changes:
+ * Changes: Hans Schillstrom <hans.schillstrom@ericsson.com>
+ * Network name space (netns) aware.
*
*/
@@ -28,9 +29,10 @@
#include <net/ip6_checksum.h>
static int
-udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp)
{
+ struct net *net;
struct ip_vs_service *svc;
struct udphdr _udph, *uh;
struct ip_vs_iphdr iph;
@@ -42,13 +44,13 @@ udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
*verdict = NF_DROP;
return 0;
}
-
- svc = ip_vs_service_get(af, skb->mark, iph.protocol,
+ net = skb_net(skb);
+ svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
&iph.daddr, uh->dest);
if (svc) {
int ignored;
- if (ip_vs_todrop()) {
+ if (ip_vs_todrop(net_ipvs(net))) {
/*
* It seems that we are very loaded.
* We have to drop this packet :(
@@ -62,13 +64,19 @@ udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
* Let the virtual server select a real server for the
* incoming connection, and create a connection entry.
*/
- *cpp = ip_vs_schedule(svc, skb, pp, &ignored);
- if (!*cpp && !ignored) {
- *verdict = ip_vs_leave(svc, skb, pp);
+ *cpp = ip_vs_schedule(svc, skb, pd, &ignored);
+ if (!*cpp && ignored <= 0) {
+ if (!ignored)
+ *verdict = ip_vs_leave(svc, skb, pd);
+ else {
+ ip_vs_service_put(svc);
+ *verdict = NF_DROP;
+ }
return 0;
}
ip_vs_service_put(svc);
}
+ /* NF_ACCEPT */
return 1;
}
@@ -338,19 +346,6 @@ udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
return 1;
}
-
-/*
- * Note: the caller guarantees that only one of register_app,
- * unregister_app or app_conn_bind is called each time.
- */
-
-#define UDP_APP_TAB_BITS 4
-#define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS)
-#define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1)
-
-static struct list_head udp_apps[UDP_APP_TAB_SIZE];
-static DEFINE_SPINLOCK(udp_app_lock);
-
static inline __u16 udp_app_hashkey(__be16 port)
{
return (((__force u16)port >> UDP_APP_TAB_BITS) ^ (__force u16)port)
@@ -358,44 +353,50 @@ static inline __u16 udp_app_hashkey(__be16 port)
}
-static int udp_register_app(struct ip_vs_app *inc)
+static int udp_register_app(struct net *net, struct ip_vs_app *inc)
{
struct ip_vs_app *i;
__u16 hash;
__be16 port = inc->port;
int ret = 0;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
hash = udp_app_hashkey(port);
- spin_lock_bh(&udp_app_lock);
- list_for_each_entry(i, &udp_apps[hash], p_list) {
+ spin_lock_bh(&ipvs->udp_app_lock);
+ list_for_each_entry(i, &ipvs->udp_apps[hash], p_list) {
if (i->port == port) {
ret = -EEXIST;
goto out;
}
}
- list_add(&inc->p_list, &udp_apps[hash]);
- atomic_inc(&ip_vs_protocol_udp.appcnt);
+ list_add(&inc->p_list, &ipvs->udp_apps[hash]);
+ atomic_inc(&pd->appcnt);
out:
- spin_unlock_bh(&udp_app_lock);
+ spin_unlock_bh(&ipvs->udp_app_lock);
return ret;
}
static void
-udp_unregister_app(struct ip_vs_app *inc)
+udp_unregister_app(struct net *net, struct ip_vs_app *inc)
{
- spin_lock_bh(&udp_app_lock);
- atomic_dec(&ip_vs_protocol_udp.appcnt);
+ struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ spin_lock_bh(&ipvs->udp_app_lock);
+ atomic_dec(&pd->appcnt);
list_del(&inc->p_list);
- spin_unlock_bh(&udp_app_lock);
+ spin_unlock_bh(&ipvs->udp_app_lock);
}
static int udp_app_conn_bind(struct ip_vs_conn *cp)
{
+ struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
int hash;
struct ip_vs_app *inc;
int result = 0;
@@ -407,12 +408,12 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp)
/* Lookup application incarnations and bind the right one */
hash = udp_app_hashkey(cp->vport);
- spin_lock(&udp_app_lock);
- list_for_each_entry(inc, &udp_apps[hash], p_list) {
+ spin_lock(&ipvs->udp_app_lock);
+ list_for_each_entry(inc, &ipvs->udp_apps[hash], p_list) {
if (inc->port == cp->vport) {
if (unlikely(!ip_vs_app_inc_get(inc)))
break;
- spin_unlock(&udp_app_lock);
+ spin_unlock(&ipvs->udp_app_lock);
IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
"%s:%u to app %s on port %u\n",
@@ -429,14 +430,14 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp)
goto out;
}
}
- spin_unlock(&udp_app_lock);
+ spin_unlock(&ipvs->udp_app_lock);
out:
return result;
}
-static int udp_timeouts[IP_VS_UDP_S_LAST+1] = {
+static const int udp_timeouts[IP_VS_UDP_S_LAST+1] = {
[IP_VS_UDP_S_NORMAL] = 5*60*HZ,
[IP_VS_UDP_S_LAST] = 2*HZ,
};
@@ -446,14 +447,6 @@ static const char *const udp_state_name_table[IP_VS_UDP_S_LAST+1] = {
[IP_VS_UDP_S_LAST] = "BUG!",
};
-
-static int
-udp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
-{
- return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_UDP_S_LAST,
- udp_state_name_table, sname, to);
-}
-
static const char * udp_state_name(int state)
{
if (state >= IP_VS_UDP_S_LAST)
@@ -464,20 +457,30 @@ static const char * udp_state_name(int state)
static int
udp_state_transition(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb,
- struct ip_vs_protocol *pp)
+ struct ip_vs_proto_data *pd)
{
- cp->timeout = pp->timeout_table[IP_VS_UDP_S_NORMAL];
+ if (unlikely(!pd)) {
+ pr_err("UDP no ns data\n");
+ return 0;
+ }
+
+ cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
return 1;
}
-static void udp_init(struct ip_vs_protocol *pp)
+static void __udp_init(struct net *net, struct ip_vs_proto_data *pd)
{
- IP_VS_INIT_HASH_TABLE(udp_apps);
- pp->timeout_table = udp_timeouts;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ ip_vs_init_hash_table(ipvs->udp_apps, UDP_APP_TAB_SIZE);
+ spin_lock_init(&ipvs->udp_app_lock);
+ pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
+ sizeof(udp_timeouts));
}
-static void udp_exit(struct ip_vs_protocol *pp)
+static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd)
{
+ kfree(pd->timeout_table);
}
@@ -486,8 +489,10 @@ struct ip_vs_protocol ip_vs_protocol_udp = {
.protocol = IPPROTO_UDP,
.num_states = IP_VS_UDP_S_LAST,
.dont_defrag = 0,
- .init = udp_init,
- .exit = udp_exit,
+ .init = NULL,
+ .exit = NULL,
+ .init_netns = __udp_init,
+ .exit_netns = __udp_exit,
.conn_schedule = udp_conn_schedule,
.conn_in_get = ip_vs_conn_in_get_proto,
.conn_out_get = ip_vs_conn_out_get_proto,
@@ -501,5 +506,4 @@ struct ip_vs_protocol ip_vs_protocol_udp = {
.app_conn_bind = udp_app_conn_bind,
.debug_packet = ip_vs_tcpudp_debug_packet,
.timeout_change = NULL,
- .set_state_timeout = udp_set_state_timeout,
};
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index ab85aed..d1b7298 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -5,6 +5,18 @@
* high-performance and highly available server based on a
* cluster of servers.
*
+ * Version 1, is capable of handling both version 0 and 1 messages.
+ * Version 0 is the plain old format.
+ * Note Version 0 receivers will just drop Ver 1 messages.
+ * Version 1 is capable of handle IPv6, Persistence data,
+ * time-outs, and firewall marks.
+ * In ver.1 "ip_vs_sync_conn_options" will be sent in netw. order.
+ * Ver. 0 can be turned on by sysctl -w net.ipv4.vs.sync_version=0
+ *
+ * Definitions Message: is a complete datagram
+ * Sync_conn: is a part of a Message
+ * Param Data is an option to a Sync_conn.
+ *
* Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
*
* ip_vs_sync: sync connection info from master load balancer to backups
@@ -15,6 +27,8 @@
* Alexandre Cassen : Added SyncID support for incoming sync
* messages filtering.
* Justin Ossevoort : Fix endian problem on sync message size.
+ * Hans Schillstrom : Added Version 1: i.e. IPv6,
+ * Persistence support, fwmark and time-out.
*/
#define KMSG_COMPONENT "IPVS"
@@ -35,6 +49,8 @@
#include <linux/wait.h>
#include <linux/kernel.h>
+#include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */
+
#include <net/ip.h>
#include <net/sock.h>
@@ -43,11 +59,13 @@
#define IP_VS_SYNC_GROUP 0xe0000051 /* multicast addr - 224.0.0.81 */
#define IP_VS_SYNC_PORT 8848 /* multicast port */
+#define SYNC_PROTO_VER 1 /* Protocol version in header */
/*
* IPVS sync connection entry
+ * Version 0, i.e. original version.
*/
-struct ip_vs_sync_conn {
+struct ip_vs_sync_conn_v0 {
__u8 reserved;
/* Protocol, addresses and port numbers */
@@ -71,41 +89,159 @@ struct ip_vs_sync_conn_options {
struct ip_vs_seq out_seq; /* outgoing seq. struct */
};
+/*
+ Sync Connection format (sync_conn)
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Type | Protocol | Ver. | Size |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Flags |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | State | cport |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | vport | dport |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | fwmark |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | timeout (in sec.) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | ... |
+ | IP-Addresses (v4 or v6) |
+ | ... |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ Optional Parameters.
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Param. Type | Param. Length | Param. data |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
+ | ... |
+ | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | | Param Type | Param. Length |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Param data |
+ | Last Param data should be padded for 32 bit alignment |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+
+/*
+ * Type 0, IPv4 sync connection format
+ */
+struct ip_vs_sync_v4 {
+ __u8 type;
+ __u8 protocol; /* Which protocol (TCP/UDP) */
+ __be16 ver_size; /* Version msb 4 bits */
+ /* Flags and state transition */
+ __be32 flags; /* status flags */
+ __be16 state; /* state info */
+ /* Protocol, addresses and port numbers */
+ __be16 cport;
+ __be16 vport;
+ __be16 dport;
+ __be32 fwmark; /* Firewall mark from skb */
+ __be32 timeout; /* cp timeout */
+ __be32 caddr; /* client address */
+ __be32 vaddr; /* virtual address */
+ __be32 daddr; /* destination address */
+ /* The sequence options start here */
+ /* PE data padded to 32bit alignment after seq. options */
+};
+/*
+ * Type 2 messages IPv6
+ */
+struct ip_vs_sync_v6 {
+ __u8 type;
+ __u8 protocol; /* Which protocol (TCP/UDP) */
+ __be16 ver_size; /* Version msb 4 bits */
+ /* Flags and state transition */
+ __be32 flags; /* status flags */
+ __be16 state; /* state info */
+ /* Protocol, addresses and port numbers */
+ __be16 cport;
+ __be16 vport;
+ __be16 dport;
+ __be32 fwmark; /* Firewall mark from skb */
+ __be32 timeout; /* cp timeout */
+ struct in6_addr caddr; /* client address */
+ struct in6_addr vaddr; /* virtual address */
+ struct in6_addr daddr; /* destination address */
+ /* The sequence options start here */
+ /* PE data padded to 32bit alignment after seq. options */
+};
+
+union ip_vs_sync_conn {
+ struct ip_vs_sync_v4 v4;
+ struct ip_vs_sync_v6 v6;
+};
+
+/* Bits in Type field in above */
+#define STYPE_INET6 0
+#define STYPE_F_INET6 (1 << STYPE_INET6)
+
+#define SVER_SHIFT 12 /* Shift to get version */
+#define SVER_MASK 0x0fff /* Mask to strip version */
+
+#define IPVS_OPT_SEQ_DATA 1
+#define IPVS_OPT_PE_DATA 2
+#define IPVS_OPT_PE_NAME 3
+#define IPVS_OPT_PARAM 7
+
+#define IPVS_OPT_F_SEQ_DATA (1 << (IPVS_OPT_SEQ_DATA-1))
+#define IPVS_OPT_F_PE_DATA (1 << (IPVS_OPT_PE_DATA-1))
+#define IPVS_OPT_F_PE_NAME (1 << (IPVS_OPT_PE_NAME-1))
+#define IPVS_OPT_F_PARAM (1 << (IPVS_OPT_PARAM-1))
+
struct ip_vs_sync_thread_data {
+ struct net *net;
struct socket *sock;
char *buf;
};
-#define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn))
+/* Version 0 definition of packet sizes */
+#define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn_v0))
#define FULL_CONN_SIZE \
-(sizeof(struct ip_vs_sync_conn) + sizeof(struct ip_vs_sync_conn_options))
+(sizeof(struct ip_vs_sync_conn_v0) + sizeof(struct ip_vs_sync_conn_options))
/*
- The master mulitcasts messages to the backup load balancers in the
- following format.
+ The master mulitcasts messages (Datagrams) to the backup load balancers
+ in the following format.
+
+ Version 1:
+ Note, first byte should be Zero, so ver 0 receivers will drop the packet.
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | Count Conns | SyncID | Size |
+ | 0 | SyncID | Size |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Count Conns | Version | Reserved, set to Zero |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| IPVS Sync Connection (1) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| . |
- | . |
+ ~ . ~
| . |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
| IPVS Sync Connection (n) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Version 0 Header
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | Count Conns | SyncID | Size |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | IPVS Sync Connection (1) |
*/
#define SYNC_MESG_HEADER_LEN 4
#define MAX_CONNS_PER_SYNCBUFF 255 /* nr_conns in ip_vs_sync_mesg is 8 bit */
-struct ip_vs_sync_mesg {
+/* Version 0 header */
+struct ip_vs_sync_mesg_v0 {
__u8 nr_conns;
__u8 syncid;
__u16 size;
@@ -113,9 +249,16 @@ struct ip_vs_sync_mesg {
/* ip_vs_sync_conn entries start here */
};
-/* the maximum length of sync (sending/receiving) message */
-static int sync_send_mesg_maxlen;
-static int sync_recv_mesg_maxlen;
+/* Version 1 header */
+struct ip_vs_sync_mesg {
+ __u8 reserved; /* must be zero */
+ __u8 syncid;
+ __u16 size;
+ __u8 nr_conns;
+ __s8 version; /* SYNC_PROTO_VER */
+ __u16 spare;
+ /* ip_vs_sync_conn entries start here */
+};
struct ip_vs_sync_buff {
struct list_head list;
@@ -127,28 +270,6 @@ struct ip_vs_sync_buff {
unsigned char *end;
};
-
-/* the sync_buff list head and the lock */
-static LIST_HEAD(ip_vs_sync_queue);
-static DEFINE_SPINLOCK(ip_vs_sync_lock);
-
-/* current sync_buff for accepting new conn entries */
-static struct ip_vs_sync_buff *curr_sb = NULL;
-static DEFINE_SPINLOCK(curr_sb_lock);
-
-/* ipvs sync daemon state */
-volatile int ip_vs_sync_state = IP_VS_STATE_NONE;
-volatile int ip_vs_master_syncid = 0;
-volatile int ip_vs_backup_syncid = 0;
-
-/* multicast interface name */
-char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-
-/* sync daemon tasks */
-static struct task_struct *sync_master_thread;
-static struct task_struct *sync_backup_thread;
-
/* multicast addr */
static struct sockaddr_in mcast_addr = {
.sin_family = AF_INET,
@@ -156,41 +277,71 @@ static struct sockaddr_in mcast_addr = {
.sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP),
};
+/*
+ * Copy of struct ip_vs_seq
+ * From unaligned network order to aligned host order
+ */
+static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho)
+{
+ ho->init_seq = get_unaligned_be32(&no->init_seq);
+ ho->delta = get_unaligned_be32(&no->delta);
+ ho->previous_delta = get_unaligned_be32(&no->previous_delta);
+}
+
+/*
+ * Copy of struct ip_vs_seq
+ * From Aligned host order to unaligned network order
+ */
+static void hton_seq(struct ip_vs_seq *ho, struct ip_vs_seq *no)
+{
+ put_unaligned_be32(ho->init_seq, &no->init_seq);
+ put_unaligned_be32(ho->delta, &no->delta);
+ put_unaligned_be32(ho->previous_delta, &no->previous_delta);
+}
-static inline struct ip_vs_sync_buff *sb_dequeue(void)
+static inline struct ip_vs_sync_buff *sb_dequeue(struct netns_ipvs *ipvs)
{
struct ip_vs_sync_buff *sb;
- spin_lock_bh(&ip_vs_sync_lock);
- if (list_empty(&ip_vs_sync_queue)) {
+ spin_lock_bh(&ipvs->sync_lock);
+ if (list_empty(&ipvs->sync_queue)) {
sb = NULL;
} else {
- sb = list_entry(ip_vs_sync_queue.next,
+ sb = list_entry(ipvs->sync_queue.next,
struct ip_vs_sync_buff,
list);
list_del(&sb->list);
}
- spin_unlock_bh(&ip_vs_sync_lock);
+ spin_unlock_bh(&ipvs->sync_lock);
return sb;
}
-static inline struct ip_vs_sync_buff * ip_vs_sync_buff_create(void)
+/*
+ * Create a new sync buffer for Version 1 proto.
+ */
+static inline struct ip_vs_sync_buff *
+ip_vs_sync_buff_create(struct netns_ipvs *ipvs)
{
struct ip_vs_sync_buff *sb;
if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
return NULL;
- if (!(sb->mesg=kmalloc(sync_send_mesg_maxlen, GFP_ATOMIC))) {
+ sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC);
+ if (!sb->mesg) {
kfree(sb);
return NULL;
}
+ sb->mesg->reserved = 0; /* old nr_conns i.e. must be zeo now */
+ sb->mesg->version = SYNC_PROTO_VER;
+ sb->mesg->syncid = ipvs->master_syncid;
+ sb->mesg->size = sizeof(struct ip_vs_sync_mesg);
sb->mesg->nr_conns = 0;
- sb->mesg->syncid = ip_vs_master_syncid;
- sb->mesg->size = 4;
- sb->head = (unsigned char *)sb->mesg + 4;
- sb->end = (unsigned char *)sb->mesg + sync_send_mesg_maxlen;
+ sb->mesg->spare = 0;
+ sb->head = (unsigned char *)sb->mesg + sizeof(struct ip_vs_sync_mesg);
+ sb->end = (unsigned char *)sb->mesg + ipvs->send_mesg_maxlen;
+
sb->firstuse = jiffies;
return sb;
}
@@ -201,14 +352,16 @@ static inline void ip_vs_sync_buff_release(struct ip_vs_sync_buff *sb)
kfree(sb);
}
-static inline void sb_queue_tail(struct ip_vs_sync_buff *sb)
+static inline void sb_queue_tail(struct netns_ipvs *ipvs)
{
- spin_lock(&ip_vs_sync_lock);
- if (ip_vs_sync_state & IP_VS_STATE_MASTER)
- list_add_tail(&sb->list, &ip_vs_sync_queue);
+ struct ip_vs_sync_buff *sb = ipvs->sync_buff;
+
+ spin_lock(&ipvs->sync_lock);
+ if (ipvs->sync_state & IP_VS_STATE_MASTER)
+ list_add_tail(&sb->list, &ipvs->sync_queue);
else
ip_vs_sync_buff_release(sb);
- spin_unlock(&ip_vs_sync_lock);
+ spin_unlock(&ipvs->sync_lock);
}
/*
@@ -216,36 +369,101 @@ static inline void sb_queue_tail(struct ip_vs_sync_buff *sb)
* than the specified time or the specified time is zero.
*/
static inline struct ip_vs_sync_buff *
-get_curr_sync_buff(unsigned long time)
+get_curr_sync_buff(struct netns_ipvs *ipvs, unsigned long time)
{
struct ip_vs_sync_buff *sb;
- spin_lock_bh(&curr_sb_lock);
- if (curr_sb && (time == 0 ||
- time_before(jiffies - curr_sb->firstuse, time))) {
- sb = curr_sb;
- curr_sb = NULL;
+ spin_lock_bh(&ipvs->sync_buff_lock);
+ if (ipvs->sync_buff && (time == 0 ||
+ time_before(jiffies - ipvs->sync_buff->firstuse, time))) {
+ sb = ipvs->sync_buff;
+ ipvs->sync_buff = NULL;
} else
sb = NULL;
- spin_unlock_bh(&curr_sb_lock);
+ spin_unlock_bh(&ipvs->sync_buff_lock);
return sb;
}
+/*
+ * Switch mode from sending version 0 or 1
+ * - must handle sync_buf
+ */
+void ip_vs_sync_switch_mode(struct net *net, int mode)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ if (!(ipvs->sync_state & IP_VS_STATE_MASTER))
+ return;
+ if (mode == ipvs->sysctl_sync_ver || !ipvs->sync_buff)
+ return;
+
+ spin_lock_bh(&ipvs->sync_buff_lock);
+ /* Buffer empty ? then let buf_create do the job */
+ if (ipvs->sync_buff->mesg->size <= sizeof(struct ip_vs_sync_mesg)) {
+ kfree(ipvs->sync_buff);
+ ipvs->sync_buff = NULL;
+ } else {
+ spin_lock_bh(&ipvs->sync_lock);
+ if (ipvs->sync_state & IP_VS_STATE_MASTER)
+ list_add_tail(&ipvs->sync_buff->list,
+ &ipvs->sync_queue);
+ else
+ ip_vs_sync_buff_release(ipvs->sync_buff);
+ spin_unlock_bh(&ipvs->sync_lock);
+ }
+ spin_unlock_bh(&ipvs->sync_buff_lock);
+}
/*
+ * Create a new sync buffer for Version 0 proto.
+ */
+static inline struct ip_vs_sync_buff *
+ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs)
+{
+ struct ip_vs_sync_buff *sb;
+ struct ip_vs_sync_mesg_v0 *mesg;
+
+ if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
+ return NULL;
+
+ sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC);
+ if (!sb->mesg) {
+ kfree(sb);
+ return NULL;
+ }
+ mesg = (struct ip_vs_sync_mesg_v0 *)sb->mesg;
+ mesg->nr_conns = 0;
+ mesg->syncid = ipvs->master_syncid;
+ mesg->size = sizeof(struct ip_vs_sync_mesg_v0);
+ sb->head = (unsigned char *)mesg + sizeof(struct ip_vs_sync_mesg_v0);
+ sb->end = (unsigned char *)mesg + ipvs->send_mesg_maxlen;
+ sb->firstuse = jiffies;
+ return sb;
+}
+
+/*
+ * Version 0 , could be switched in by sys_ctl.
* Add an ip_vs_conn information into the current sync_buff.
- * Called by ip_vs_in.
*/
-void ip_vs_sync_conn(struct ip_vs_conn *cp)
+void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp)
{
- struct ip_vs_sync_mesg *m;
- struct ip_vs_sync_conn *s;
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_sync_mesg_v0 *m;
+ struct ip_vs_sync_conn_v0 *s;
int len;
- spin_lock(&curr_sb_lock);
- if (!curr_sb) {
- if (!(curr_sb=ip_vs_sync_buff_create())) {
- spin_unlock(&curr_sb_lock);
+ if (unlikely(cp->af != AF_INET))
+ return;
+ /* Do not sync ONE PACKET */
+ if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+ return;
+
+ spin_lock(&ipvs->sync_buff_lock);
+ if (!ipvs->sync_buff) {
+ ipvs->sync_buff =
+ ip_vs_sync_buff_create_v0(ipvs);
+ if (!ipvs->sync_buff) {
+ spin_unlock(&ipvs->sync_buff_lock);
pr_err("ip_vs_sync_buff_create failed.\n");
return;
}
@@ -253,10 +471,11 @@ void ip_vs_sync_conn(struct ip_vs_conn *cp)
len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE :
SIMPLE_CONN_SIZE;
- m = curr_sb->mesg;
- s = (struct ip_vs_sync_conn *)curr_sb->head;
+ m = (struct ip_vs_sync_mesg_v0 *)ipvs->sync_buff->mesg;
+ s = (struct ip_vs_sync_conn_v0 *)ipvs->sync_buff->head;
/* copy members */
+ s->reserved = 0;
s->protocol = cp->protocol;
s->cport = cp->cport;
s->vport = cp->vport;
@@ -274,83 +493,366 @@ void ip_vs_sync_conn(struct ip_vs_conn *cp)
m->nr_conns++;
m->size += len;
- curr_sb->head += len;
+ ipvs->sync_buff->head += len;
/* check if there is a space for next one */
- if (curr_sb->head+FULL_CONN_SIZE > curr_sb->end) {
- sb_queue_tail(curr_sb);
- curr_sb = NULL;
+ if (ipvs->sync_buff->head + FULL_CONN_SIZE > ipvs->sync_buff->end) {
+ sb_queue_tail(ipvs);
+ ipvs->sync_buff = NULL;
}
- spin_unlock(&curr_sb_lock);
+ spin_unlock(&ipvs->sync_buff_lock);
/* synchronize its controller if it has */
if (cp->control)
- ip_vs_sync_conn(cp->control);
+ ip_vs_sync_conn(net, cp->control);
+}
+
+/*
+ * Add an ip_vs_conn information into the current sync_buff.
+ * Called by ip_vs_in.
+ * Sending Version 1 messages
+ */
+void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_sync_mesg *m;
+ union ip_vs_sync_conn *s;
+ __u8 *p;
+ unsigned int len, pe_name_len, pad;
+
+ /* Handle old version of the protocol */
+ if (ipvs->sysctl_sync_ver == 0) {
+ ip_vs_sync_conn_v0(net, cp);
+ return;
+ }
+ /* Do not sync ONE PACKET */
+ if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+ goto control;
+sloop:
+ /* Sanity checks */
+ pe_name_len = 0;
+ if (cp->pe_data_len) {
+ if (!cp->pe_data || !cp->dest) {
+ IP_VS_ERR_RL("SYNC, connection pe_data invalid\n");
+ return;
+ }
+ pe_name_len = strnlen(cp->pe->name, IP_VS_PENAME_MAXLEN);
+ }
+
+ spin_lock(&ipvs->sync_buff_lock);
+
+#ifdef CONFIG_IP_VS_IPV6
+ if (cp->af == AF_INET6)
+ len = sizeof(struct ip_vs_sync_v6);
+ else
+#endif
+ len = sizeof(struct ip_vs_sync_v4);
+
+ if (cp->flags & IP_VS_CONN_F_SEQ_MASK)
+ len += sizeof(struct ip_vs_sync_conn_options) + 2;
+
+ if (cp->pe_data_len)
+ len += cp->pe_data_len + 2; /* + Param hdr field */
+ if (pe_name_len)
+ len += pe_name_len + 2;
+
+ /* check if there is a space for this one */
+ pad = 0;
+ if (ipvs->sync_buff) {
+ pad = (4 - (size_t)ipvs->sync_buff->head) & 3;
+ if (ipvs->sync_buff->head + len + pad > ipvs->sync_buff->end) {
+ sb_queue_tail(ipvs);
+ ipvs->sync_buff = NULL;
+ pad = 0;
+ }
+ }
+
+ if (!ipvs->sync_buff) {
+ ipvs->sync_buff = ip_vs_sync_buff_create(ipvs);
+ if (!ipvs->sync_buff) {
+ spin_unlock(&ipvs->sync_buff_lock);
+ pr_err("ip_vs_sync_buff_create failed.\n");
+ return;
+ }
+ }
+
+ m = ipvs->sync_buff->mesg;
+ p = ipvs->sync_buff->head;
+ ipvs->sync_buff->head += pad + len;
+ m->size += pad + len;
+ /* Add ev. padding from prev. sync_conn */
+ while (pad--)
+ *(p++) = 0;
+
+ s = (union ip_vs_sync_conn *)p;
+
+ /* Set message type & copy members */
+ s->v4.type = (cp->af == AF_INET6 ? STYPE_F_INET6 : 0);
+ s->v4.ver_size = htons(len & SVER_MASK); /* Version 0 */
+ s->v4.flags = htonl(cp->flags & ~IP_VS_CONN_F_HASHED);
+ s->v4.state = htons(cp->state);
+ s->v4.protocol = cp->protocol;
+ s->v4.cport = cp->cport;
+ s->v4.vport = cp->vport;
+ s->v4.dport = cp->dport;
+ s->v4.fwmark = htonl(cp->fwmark);
+ s->v4.timeout = htonl(cp->timeout / HZ);
+ m->nr_conns++;
+
+#ifdef CONFIG_IP_VS_IPV6
+ if (cp->af == AF_INET6) {
+ p += sizeof(struct ip_vs_sync_v6);
+ ipv6_addr_copy(&s->v6.caddr, &cp->caddr.in6);
+ ipv6_addr_copy(&s->v6.vaddr, &cp->vaddr.in6);
+ ipv6_addr_copy(&s->v6.daddr, &cp->daddr.in6);
+ } else
+#endif
+ {
+ p += sizeof(struct ip_vs_sync_v4); /* options ptr */
+ s->v4.caddr = cp->caddr.ip;
+ s->v4.vaddr = cp->vaddr.ip;
+ s->v4.daddr = cp->daddr.ip;
+ }
+ if (cp->flags & IP_VS_CONN_F_SEQ_MASK) {
+ *(p++) = IPVS_OPT_SEQ_DATA;
+ *(p++) = sizeof(struct ip_vs_sync_conn_options);
+ hton_seq((struct ip_vs_seq *)p, &cp->in_seq);
+ p += sizeof(struct ip_vs_seq);
+ hton_seq((struct ip_vs_seq *)p, &cp->out_seq);
+ p += sizeof(struct ip_vs_seq);
+ }
+ /* Handle pe data */
+ if (cp->pe_data_len && cp->pe_data) {
+ *(p++) = IPVS_OPT_PE_DATA;
+ *(p++) = cp->pe_data_len;
+ memcpy(p, cp->pe_data, cp->pe_data_len);
+ p += cp->pe_data_len;
+ if (pe_name_len) {
+ /* Add PE_NAME */
+ *(p++) = IPVS_OPT_PE_NAME;
+ *(p++) = pe_name_len;
+ memcpy(p, cp->pe->name, pe_name_len);
+ p += pe_name_len;
+ }
+ }
+
+ spin_unlock(&ipvs->sync_buff_lock);
+
+control:
+ /* synchronize its controller if it has */
+ cp = cp->control;
+ if (!cp)
+ return;
+ /*
+ * Reduce sync rate for templates
+ * i.e only increment in_pkts for Templates.
+ */
+ if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
+ int pkts = atomic_add_return(1, &cp->in_pkts);
+
+ if (pkts % ipvs->sysctl_sync_threshold[1] != 1)
+ return;
+ }
+ goto sloop;
}
+/*
+ * fill_param used by version 1
+ */
static inline int
-ip_vs_conn_fill_param_sync(int af, int protocol,
- const union nf_inet_addr *caddr, __be16 cport,
- const union nf_inet_addr *vaddr, __be16 vport,
- struct ip_vs_conn_param *p)
+ip_vs_conn_fill_param_sync(struct net *net, int af, union ip_vs_sync_conn *sc,
+ struct ip_vs_conn_param *p,
+ __u8 *pe_data, unsigned int pe_data_len,
+ __u8 *pe_name, unsigned int pe_name_len)
{
- /* XXX: Need to take into account persistence engine */
- ip_vs_conn_fill_param(af, protocol, caddr, cport, vaddr, vport, p);
+#ifdef CONFIG_IP_VS_IPV6
+ if (af == AF_INET6)
+ ip_vs_conn_fill_param(net, af, sc->v6.protocol,
+ (const union nf_inet_addr *)&sc->v6.caddr,
+ sc->v6.cport,
+ (const union nf_inet_addr *)&sc->v6.vaddr,
+ sc->v6.vport, p);
+ else
+#endif
+ ip_vs_conn_fill_param(net, af, sc->v4.protocol,
+ (const union nf_inet_addr *)&sc->v4.caddr,
+ sc->v4.cport,
+ (const union nf_inet_addr *)&sc->v4.vaddr,
+ sc->v4.vport, p);
+ /* Handle pe data */
+ if (pe_data_len) {
+ if (pe_name_len) {
+ char buff[IP_VS_PENAME_MAXLEN+1];
+
+ memcpy(buff, pe_name, pe_name_len);
+ buff[pe_name_len]=0;
+ p->pe = __ip_vs_pe_getbyname(buff);
+ if (!p->pe) {
+ IP_VS_DBG(3, "BACKUP, no %s engine found/loaded\n",
+ buff);
+ return 1;
+ }
+ } else {
+ IP_VS_ERR_RL("BACKUP, Invalid PE parameters\n");
+ return 1;
+ }
+
+ p->pe_data = kmalloc(pe_data_len, GFP_ATOMIC);
+ if (!p->pe_data) {
+ if (p->pe->module)
+ module_put(p->pe->module);
+ return -ENOMEM;
+ }
+ memcpy(p->pe_data, pe_data, pe_data_len);
+ p->pe_data_len = pe_data_len;
+ }
return 0;
}
/*
- * Process received multicast message and create the corresponding
- * ip_vs_conn entries.
+ * Connection Add / Update.
+ * Common for version 0 and 1 reception of backup sync_conns.
+ * Param: ...
+ * timeout is in sec.
*/
-static void ip_vs_process_message(const char *buffer, const size_t buflen)
+static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
+ unsigned int flags, unsigned int state,
+ unsigned int protocol, unsigned int type,
+ const union nf_inet_addr *daddr, __be16 dport,
+ unsigned long timeout, __u32 fwmark,
+ struct ip_vs_sync_conn_options *opt)
{
- struct ip_vs_sync_mesg *m = (struct ip_vs_sync_mesg *)buffer;
- struct ip_vs_sync_conn *s;
- struct ip_vs_sync_conn_options *opt;
- struct ip_vs_conn *cp;
- struct ip_vs_protocol *pp;
struct ip_vs_dest *dest;
- struct ip_vs_conn_param param;
- char *p;
- int i;
+ struct ip_vs_conn *cp;
+ struct netns_ipvs *ipvs = net_ipvs(net);
- if (buflen < sizeof(struct ip_vs_sync_mesg)) {
- IP_VS_ERR_RL("sync message header too short\n");
- return;
- }
+ if (!(flags & IP_VS_CONN_F_TEMPLATE))
+ cp = ip_vs_conn_in_get(param);
+ else
+ cp = ip_vs_ct_in_get(param);
- /* Convert size back to host byte order */
- m->size = ntohs(m->size);
+ if (cp && param->pe_data) /* Free pe_data */
+ kfree(param->pe_data);
+ if (!cp) {
+ /*
+ * Find the appropriate destination for the connection.
+ * If it is not found the connection will remain unbound
+ * but still handled.
+ */
+ dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr,
+ param->vport, protocol, fwmark);
- if (buflen != m->size) {
- IP_VS_ERR_RL("bogus sync message size\n");
- return;
+ /* Set the approprite ativity flag */
+ if (protocol == IPPROTO_TCP) {
+ if (state != IP_VS_TCP_S_ESTABLISHED)
+ flags |= IP_VS_CONN_F_INACTIVE;
+ else
+ flags &= ~IP_VS_CONN_F_INACTIVE;
+ } else if (protocol == IPPROTO_SCTP) {
+ if (state != IP_VS_SCTP_S_ESTABLISHED)
+ flags |= IP_VS_CONN_F_INACTIVE;
+ else
+ flags &= ~IP_VS_CONN_F_INACTIVE;
+ }
+ cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark);
+ if (dest)
+ atomic_dec(&dest->refcnt);
+ if (!cp) {
+ if (param->pe_data)
+ kfree(param->pe_data);
+ IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
+ return;
+ }
+ } else if (!cp->dest) {
+ dest = ip_vs_try_bind_dest(cp);
+ if (dest)
+ atomic_dec(&dest->refcnt);
+ } else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) &&
+ (cp->state != state)) {
+ /* update active/inactive flag for the connection */
+ dest = cp->dest;
+ if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
+ (state != IP_VS_TCP_S_ESTABLISHED)) {
+ atomic_dec(&dest->activeconns);
+ atomic_inc(&dest->inactconns);
+ cp->flags |= IP_VS_CONN_F_INACTIVE;
+ } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
+ (state == IP_VS_TCP_S_ESTABLISHED)) {
+ atomic_inc(&dest->activeconns);
+ atomic_dec(&dest->inactconns);
+ cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+ }
+ } else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) &&
+ (cp->state != state)) {
+ dest = cp->dest;
+ if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
+ (state != IP_VS_SCTP_S_ESTABLISHED)) {
+ atomic_dec(&dest->activeconns);
+ atomic_inc(&dest->inactconns);
+ cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+ }
}
- /* SyncID sanity check */
- if (ip_vs_backup_syncid != 0 && m->syncid != ip_vs_backup_syncid) {
- IP_VS_DBG(7, "Ignoring incoming msg with syncid = %d\n",
- m->syncid);
- return;
+ if (opt)
+ memcpy(&cp->in_seq, opt, sizeof(*opt));
+ atomic_set(&cp->in_pkts, ipvs->sysctl_sync_threshold[0]);
+ cp->state = state;
+ cp->old_state = cp->state;
+ /*
+ * For Ver 0 messages style
+ * - Not possible to recover the right timeout for templates
+ * - can not find the right fwmark
+ * virtual service. If needed, we can do it for
+ * non-fwmark persistent services.
+ * Ver 1 messages style.
+ * - No problem.
+ */
+ if (timeout) {
+ if (timeout > MAX_SCHEDULE_TIMEOUT / HZ)
+ timeout = MAX_SCHEDULE_TIMEOUT / HZ;
+ cp->timeout = timeout*HZ;
+ } else {
+ struct ip_vs_proto_data *pd;
+
+ pd = ip_vs_proto_data_get(net, protocol);
+ if (!(flags & IP_VS_CONN_F_TEMPLATE) && pd && pd->timeout_table)
+ cp->timeout = pd->timeout_table[state];
+ else
+ cp->timeout = (3*60*HZ);
}
+ ip_vs_conn_put(cp);
+}
- p = (char *)buffer + sizeof(struct ip_vs_sync_mesg);
+/*
+ * Process received multicast message for Version 0
+ */
+static void ip_vs_process_message_v0(struct net *net, const char *buffer,
+ const size_t buflen)
+{
+ struct ip_vs_sync_mesg_v0 *m = (struct ip_vs_sync_mesg_v0 *)buffer;
+ struct ip_vs_sync_conn_v0 *s;
+ struct ip_vs_sync_conn_options *opt;
+ struct ip_vs_protocol *pp;
+ struct ip_vs_conn_param param;
+ char *p;
+ int i;
+
+ p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0);
for (i=0; i<m->nr_conns; i++) {
unsigned flags, state;
if (p + SIMPLE_CONN_SIZE > buffer+buflen) {
- IP_VS_ERR_RL("bogus conn in sync message\n");
+ IP_VS_ERR_RL("BACKUP v0, bogus conn\n");
return;
}
- s = (struct ip_vs_sync_conn *) p;
+ s = (struct ip_vs_sync_conn_v0 *) p;
flags = ntohs(s->flags) | IP_VS_CONN_F_SYNC;
flags &= ~IP_VS_CONN_F_HASHED;
if (flags & IP_VS_CONN_F_SEQ_MASK) {
opt = (struct ip_vs_sync_conn_options *)&s[1];
p += FULL_CONN_SIZE;
if (p > buffer+buflen) {
- IP_VS_ERR_RL("bogus conn options in sync message\n");
+ IP_VS_ERR_RL("BACKUP v0, Dropping buffer bogus conn options\n");
return;
}
} else {
@@ -362,118 +864,286 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
pp = ip_vs_proto_get(s->protocol);
if (!pp) {
- IP_VS_ERR_RL("Unsupported protocol %u in sync msg\n",
+ IP_VS_DBG(2, "BACKUP v0, Unsupported protocol %u\n",
s->protocol);
continue;
}
if (state >= pp->num_states) {
- IP_VS_DBG(2, "Invalid %s state %u in sync msg\n",
+ IP_VS_DBG(2, "BACKUP v0, Invalid %s state %u\n",
pp->name, state);
continue;
}
} else {
/* protocol in templates is not used for state/timeout */
- pp = NULL;
if (state > 0) {
- IP_VS_DBG(2, "Invalid template state %u in sync msg\n",
+ IP_VS_DBG(2, "BACKUP v0, Invalid template state %u\n",
state);
state = 0;
}
}
- {
- if (ip_vs_conn_fill_param_sync(AF_INET, s->protocol,
- (union nf_inet_addr *)&s->caddr,
- s->cport,
- (union nf_inet_addr *)&s->vaddr,
- s->vport, &param)) {
- pr_err("ip_vs_conn_fill_param_sync failed");
- return;
+ ip_vs_conn_fill_param(net, AF_INET, s->protocol,
+ (const union nf_inet_addr *)&s->caddr,
+ s->cport,
+ (const union nf_inet_addr *)&s->vaddr,
+ s->vport, &param);
+
+ /* Send timeout as Zero */
+ ip_vs_proc_conn(net, &param, flags, state, s->protocol, AF_INET,
+ (union nf_inet_addr *)&s->daddr, s->dport,
+ 0, 0, opt);
+ }
+}
+
+/*
+ * Handle options
+ */
+static inline int ip_vs_proc_seqopt(__u8 *p, unsigned int plen,
+ __u32 *opt_flags,
+ struct ip_vs_sync_conn_options *opt)
+{
+ struct ip_vs_sync_conn_options *topt;
+
+ topt = (struct ip_vs_sync_conn_options *)p;
+
+ if (plen != sizeof(struct ip_vs_sync_conn_options)) {
+ IP_VS_DBG(2, "BACKUP, bogus conn options length\n");
+ return -EINVAL;
+ }
+ if (*opt_flags & IPVS_OPT_F_SEQ_DATA) {
+ IP_VS_DBG(2, "BACKUP, conn options found twice\n");
+ return -EINVAL;
+ }
+ ntoh_seq(&topt->in_seq, &opt->in_seq);
+ ntoh_seq(&topt->out_seq, &opt->out_seq);
+ *opt_flags |= IPVS_OPT_F_SEQ_DATA;
+ return 0;
+}
+
+static int ip_vs_proc_str(__u8 *p, unsigned int plen, unsigned int *data_len,
+ __u8 **data, unsigned int maxlen,
+ __u32 *opt_flags, __u32 flag)
+{
+ if (plen > maxlen) {
+ IP_VS_DBG(2, "BACKUP, bogus par.data len > %d\n", maxlen);
+ return -EINVAL;
+ }
+ if (*opt_flags & flag) {
+ IP_VS_DBG(2, "BACKUP, Par.data found twice 0x%x\n", flag);
+ return -EINVAL;
+ }
+ *data_len = plen;
+ *data = p;
+ *opt_flags |= flag;
+ return 0;
+}
+/*
+ * Process a Version 1 sync. connection
+ */
+static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end)
+{
+ struct ip_vs_sync_conn_options opt;
+ union ip_vs_sync_conn *s;
+ struct ip_vs_protocol *pp;
+ struct ip_vs_conn_param param;
+ __u32 flags;
+ unsigned int af, state, pe_data_len=0, pe_name_len=0;
+ __u8 *pe_data=NULL, *pe_name=NULL;
+ __u32 opt_flags=0;
+ int retc=0;
+
+ s = (union ip_vs_sync_conn *) p;
+
+ if (s->v6.type & STYPE_F_INET6) {
+#ifdef CONFIG_IP_VS_IPV6
+ af = AF_INET6;
+ p += sizeof(struct ip_vs_sync_v6);
+#else
+ IP_VS_DBG(3,"BACKUP, IPv6 msg received, and IPVS is not compiled for IPv6\n");
+ retc = 10;
+ goto out;
+#endif
+ } else if (!s->v4.type) {
+ af = AF_INET;
+ p += sizeof(struct ip_vs_sync_v4);
+ } else {
+ return -10;
+ }
+ if (p > msg_end)
+ return -20;
+
+ /* Process optional params check Type & Len. */
+ while (p < msg_end) {
+ int ptype;
+ int plen;
+
+ if (p+2 > msg_end)
+ return -30;
+ ptype = *(p++);
+ plen = *(p++);
+
+ if (!plen || ((p + plen) > msg_end))
+ return -40;
+ /* Handle seq option p = param data */
+ switch (ptype & ~IPVS_OPT_F_PARAM) {
+ case IPVS_OPT_SEQ_DATA:
+ if (ip_vs_proc_seqopt(p, plen, &opt_flags, &opt))
+ return -50;
+ break;
+
+ case IPVS_OPT_PE_DATA:
+ if (ip_vs_proc_str(p, plen, &pe_data_len, &pe_data,
+ IP_VS_PEDATA_MAXLEN, &opt_flags,
+ IPVS_OPT_F_PE_DATA))
+ return -60;
+ break;
+
+ case IPVS_OPT_PE_NAME:
+ if (ip_vs_proc_str(p, plen,&pe_name_len, &pe_name,
+ IP_VS_PENAME_MAXLEN, &opt_flags,
+ IPVS_OPT_F_PE_NAME))
+ return -70;
+ break;
+
+ default:
+ /* Param data mandatory ? */
+ if (!(ptype & IPVS_OPT_F_PARAM)) {
+ IP_VS_DBG(3, "BACKUP, Unknown mandatory param %d found\n",
+ ptype & ~IPVS_OPT_F_PARAM);
+ retc = 20;
+ goto out;
}
- if (!(flags & IP_VS_CONN_F_TEMPLATE))
- cp = ip_vs_conn_in_get(&param);
- else
- cp = ip_vs_ct_in_get(&param);
}
- if (!cp) {
- /*
- * Find the appropriate destination for the connection.
- * If it is not found the connection will remain unbound
- * but still handled.
- */
- dest = ip_vs_find_dest(AF_INET,
- (union nf_inet_addr *)&s->daddr,
- s->dport,
- (union nf_inet_addr *)&s->vaddr,
- s->vport,
- s->protocol);
- /* Set the approprite ativity flag */
- if (s->protocol == IPPROTO_TCP) {
- if (state != IP_VS_TCP_S_ESTABLISHED)
- flags |= IP_VS_CONN_F_INACTIVE;
- else
- flags &= ~IP_VS_CONN_F_INACTIVE;
- } else if (s->protocol == IPPROTO_SCTP) {
- if (state != IP_VS_SCTP_S_ESTABLISHED)
- flags |= IP_VS_CONN_F_INACTIVE;
- else
- flags &= ~IP_VS_CONN_F_INACTIVE;
+ p += plen; /* Next option */
+ }
+
+ /* Get flags and Mask off unsupported */
+ flags = ntohl(s->v4.flags) & IP_VS_CONN_F_BACKUP_MASK;
+ flags |= IP_VS_CONN_F_SYNC;
+ state = ntohs(s->v4.state);
+
+ if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
+ pp = ip_vs_proto_get(s->v4.protocol);
+ if (!pp) {
+ IP_VS_DBG(3,"BACKUP, Unsupported protocol %u\n",
+ s->v4.protocol);
+ retc = 30;
+ goto out;
+ }
+ if (state >= pp->num_states) {
+ IP_VS_DBG(3, "BACKUP, Invalid %s state %u\n",
+ pp->name, state);
+ retc = 40;
+ goto out;
+ }
+ } else {
+ /* protocol in templates is not used for state/timeout */
+ if (state > 0) {
+ IP_VS_DBG(3, "BACKUP, Invalid template state %u\n",
+ state);
+ state = 0;
+ }
+ }
+ if (ip_vs_conn_fill_param_sync(net, af, s, &param, pe_data,
+ pe_data_len, pe_name, pe_name_len)) {
+ retc = 50;
+ goto out;
+ }
+ /* If only IPv4, just silent skip IPv6 */
+ if (af == AF_INET)
+ ip_vs_proc_conn(net, &param, flags, state, s->v4.protocol, af,
+ (union nf_inet_addr *)&s->v4.daddr, s->v4.dport,
+ ntohl(s->v4.timeout), ntohl(s->v4.fwmark),
+ (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
+ );
+#ifdef CONFIG_IP_VS_IPV6
+ else
+ ip_vs_proc_conn(net, &param, flags, state, s->v6.protocol, af,
+ (union nf_inet_addr *)&s->v6.daddr, s->v6.dport,
+ ntohl(s->v6.timeout), ntohl(s->v6.fwmark),
+ (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
+ );
+#endif
+ return 0;
+ /* Error exit */
+out:
+ IP_VS_DBG(2, "BACKUP, Single msg dropped err:%d\n", retc);
+ return retc;
+
+}
+/*
+ * Process received multicast message and create the corresponding
+ * ip_vs_conn entries.
+ * Handles Version 0 & 1
+ */
+static void ip_vs_process_message(struct net *net, __u8 *buffer,
+ const size_t buflen)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+ struct ip_vs_sync_mesg *m2 = (struct ip_vs_sync_mesg *)buffer;
+ __u8 *p, *msg_end;
+ int i, nr_conns;
+
+ if (buflen < sizeof(struct ip_vs_sync_mesg_v0)) {
+ IP_VS_DBG(2, "BACKUP, message header too short\n");
+ return;
+ }
+ /* Convert size back to host byte order */
+ m2->size = ntohs(m2->size);
+
+ if (buflen != m2->size) {
+ IP_VS_DBG(2, "BACKUP, bogus message size\n");
+ return;
+ }
+ /* SyncID sanity check */
+ if (ipvs->backup_syncid != 0 && m2->syncid != ipvs->backup_syncid) {
+ IP_VS_DBG(7, "BACKUP, Ignoring syncid = %d\n", m2->syncid);
+ return;
+ }
+ /* Handle version 1 message */
+ if ((m2->version == SYNC_PROTO_VER) && (m2->reserved == 0)
+ && (m2->spare == 0)) {
+
+ msg_end = buffer + sizeof(struct ip_vs_sync_mesg);
+ nr_conns = m2->nr_conns;
+
+ for (i=0; i<nr_conns; i++) {
+ union ip_vs_sync_conn *s;
+ unsigned size;
+ int retc;
+
+ p = msg_end;
+ if (p + sizeof(s->v4) > buffer+buflen) {
+ IP_VS_ERR_RL("BACKUP, Dropping buffer, to small\n");
+ return;
}
- cp = ip_vs_conn_new(&param,
- (union nf_inet_addr *)&s->daddr,
- s->dport, flags, dest);
- if (dest)
- atomic_dec(&dest->refcnt);
- if (!cp) {
- pr_err("ip_vs_conn_new failed\n");
+ s = (union ip_vs_sync_conn *)p;
+ size = ntohs(s->v4.ver_size) & SVER_MASK;
+ msg_end = p + size;
+ /* Basic sanity checks */
+ if (msg_end > buffer+buflen) {
+ IP_VS_ERR_RL("BACKUP, Dropping buffer, msg > buffer\n");
return;
}
- } else if (!cp->dest) {
- dest = ip_vs_try_bind_dest(cp);
- if (dest)
- atomic_dec(&dest->refcnt);
- } else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) &&
- (cp->state != state)) {
- /* update active/inactive flag for the connection */
- dest = cp->dest;
- if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
- (state != IP_VS_TCP_S_ESTABLISHED)) {
- atomic_dec(&dest->activeconns);
- atomic_inc(&dest->inactconns);
- cp->flags |= IP_VS_CONN_F_INACTIVE;
- } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
- (state == IP_VS_TCP_S_ESTABLISHED)) {
- atomic_inc(&dest->activeconns);
- atomic_dec(&dest->inactconns);
- cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+ if (ntohs(s->v4.ver_size) >> SVER_SHIFT) {
+ IP_VS_ERR_RL("BACKUP, Dropping buffer, Unknown version %d\n",
+ ntohs(s->v4.ver_size) >> SVER_SHIFT);
+ return;
}
- } else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) &&
- (cp->state != state)) {
- dest = cp->dest;
- if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
- (state != IP_VS_SCTP_S_ESTABLISHED)) {
- atomic_dec(&dest->activeconns);
- atomic_inc(&dest->inactconns);
- cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+ /* Process a single sync_conn */
+ retc = ip_vs_proc_sync_conn(net, p, msg_end);
+ if (retc < 0) {
+ IP_VS_ERR_RL("BACKUP, Dropping buffer, Err: %d in decoding\n",
+ retc);
+ return;
}
+ /* Make sure we have 32 bit alignment */
+ msg_end = p + ((size + 3) & ~3);
}
-
- if (opt)
- memcpy(&cp->in_seq, opt, sizeof(*opt));
- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
- cp->state = state;
- cp->old_state = cp->state;
- /*
- * We can not recover the right timeout for templates
- * in all cases, we can not find the right fwmark
- * virtual service. If needed, we can do it for
- * non-fwmark persistent services.
- */
- if (!(flags & IP_VS_CONN_F_TEMPLATE) && pp->timeout_table)
- cp->timeout = pp->timeout_table[state];
- else
- cp->timeout = (3*60*HZ);
- ip_vs_conn_put(cp);
+ } else {
+ /* Old type of message */
+ ip_vs_process_message_v0(net, buffer, buflen);
+ return;
}
}
@@ -511,8 +1181,10 @@ static int set_mcast_if(struct sock *sk, char *ifname)
{
struct net_device *dev;
struct inet_sock *inet = inet_sk(sk);
+ struct net *net = sock_net(sk);
- if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
+ dev = __dev_get_by_name(net, ifname);
+ if (!dev)
return -ENODEV;
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
@@ -531,30 +1203,33 @@ static int set_mcast_if(struct sock *sk, char *ifname)
* Set the maximum length of sync message according to the
* specified interface's MTU.
*/
-static int set_sync_mesg_maxlen(int sync_state)
+static int set_sync_mesg_maxlen(struct net *net, int sync_state)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
struct net_device *dev;
int num;
if (sync_state == IP_VS_STATE_MASTER) {
- if ((dev = __dev_get_by_name(&init_net, ip_vs_master_mcast_ifn)) == NULL)
+ dev = __dev_get_by_name(net, ipvs->master_mcast_ifn);
+ if (!dev)
return -ENODEV;
num = (dev->mtu - sizeof(struct iphdr) -
sizeof(struct udphdr) -
SYNC_MESG_HEADER_LEN - 20) / SIMPLE_CONN_SIZE;
- sync_send_mesg_maxlen = SYNC_MESG_HEADER_LEN +
+ ipvs->send_mesg_maxlen = SYNC_MESG_HEADER_LEN +
SIMPLE_CONN_SIZE * min(num, MAX_CONNS_PER_SYNCBUFF);
IP_VS_DBG(7, "setting the maximum length of sync sending "
- "message %d.\n", sync_send_mesg_maxlen);
+ "message %d.\n", ipvs->send_mesg_maxlen);
} else if (sync_state == IP_VS_STATE_BACKUP) {
- if ((dev = __dev_get_by_name(&init_net, ip_vs_backup_mcast_ifn)) == NULL)
+ dev = __dev_get_by_name(net, ipvs->backup_mcast_ifn);
+ if (!dev)
return -ENODEV;
- sync_recv_mesg_maxlen = dev->mtu -
+ ipvs->recv_mesg_maxlen = dev->mtu -
sizeof(struct iphdr) - sizeof(struct udphdr);
IP_VS_DBG(7, "setting the maximum length of sync receiving "
- "message %d.\n", sync_recv_mesg_maxlen);
+ "message %d.\n", ipvs->recv_mesg_maxlen);
}
return 0;
@@ -569,6 +1244,7 @@ static int set_sync_mesg_maxlen(int sync_state)
static int
join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
{
+ struct net *net = sock_net(sk);
struct ip_mreqn mreq;
struct net_device *dev;
int ret;
@@ -576,7 +1252,8 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
memset(&mreq, 0, sizeof(mreq));
memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
- if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
+ dev = __dev_get_by_name(net, ifname);
+ if (!dev)
return -ENODEV;
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
return -EINVAL;
@@ -593,11 +1270,13 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
static int bind_mcastif_addr(struct socket *sock, char *ifname)
{
+ struct net *net = sock_net(sock->sk);
struct net_device *dev;
__be32 addr;
struct sockaddr_in sin;
- if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
+ dev = __dev_get_by_name(net, ifname);
+ if (!dev)
return -ENODEV;
addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
@@ -619,19 +1298,20 @@ static int bind_mcastif_addr(struct socket *sock, char *ifname)
/*
* Set up sending multicast socket over UDP
*/
-static struct socket * make_send_sock(void)
+static struct socket *make_send_sock(struct net *net)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
struct socket *sock;
int result;
/* First create a socket */
- result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+ result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
if (result < 0) {
pr_err("Error during creation of socket; terminating\n");
return ERR_PTR(result);
}
- result = set_mcast_if(sock->sk, ip_vs_master_mcast_ifn);
+ result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn);
if (result < 0) {
pr_err("Error setting outbound mcast interface\n");
goto error;
@@ -640,7 +1320,7 @@ static struct socket * make_send_sock(void)
set_mcast_loop(sock->sk, 0);
set_mcast_ttl(sock->sk, 1);
- result = bind_mcastif_addr(sock, ip_vs_master_mcast_ifn);
+ result = bind_mcastif_addr(sock, ipvs->master_mcast_ifn);
if (result < 0) {
pr_err("Error binding address of the mcast interface\n");
goto error;
@@ -664,13 +1344,14 @@ static struct socket * make_send_sock(void)
/*
* Set up receiving multicast socket over UDP
*/
-static struct socket * make_receive_sock(void)
+static struct socket *make_receive_sock(struct net *net)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
struct socket *sock;
int result;
/* First create a socket */
- result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+ result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
if (result < 0) {
pr_err("Error during creation of socket; terminating\n");
return ERR_PTR(result);
@@ -689,7 +1370,7 @@ static struct socket * make_receive_sock(void)
/* join the multicast group */
result = join_mcast_group(sock->sk,
(struct in_addr *) &mcast_addr.sin_addr,
- ip_vs_backup_mcast_ifn);
+ ipvs->backup_mcast_ifn);
if (result < 0) {
pr_err("Error joining to the multicast group\n");
goto error;
@@ -760,20 +1441,21 @@ ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen)
static int sync_thread_master(void *data)
{
struct ip_vs_sync_thread_data *tinfo = data;
+ struct netns_ipvs *ipvs = net_ipvs(tinfo->net);
struct ip_vs_sync_buff *sb;
pr_info("sync thread started: state = MASTER, mcast_ifn = %s, "
"syncid = %d\n",
- ip_vs_master_mcast_ifn, ip_vs_master_syncid);
+ ipvs->master_mcast_ifn, ipvs->master_syncid);
while (!kthread_should_stop()) {
- while ((sb = sb_dequeue())) {
+ while ((sb = sb_dequeue(ipvs))) {
ip_vs_send_sync_msg(tinfo->sock, sb->mesg);
ip_vs_sync_buff_release(sb);
}
- /* check if entries stay in curr_sb for 2 seconds */
- sb = get_curr_sync_buff(2 * HZ);
+ /* check if entries stay in ipvs->sync_buff for 2 seconds */
+ sb = get_curr_sync_buff(ipvs, 2 * HZ);
if (sb) {
ip_vs_send_sync_msg(tinfo->sock, sb->mesg);
ip_vs_sync_buff_release(sb);
@@ -783,14 +1465,13 @@ static int sync_thread_master(void *data)
}
/* clean up the sync_buff queue */
- while ((sb=sb_dequeue())) {
+ while ((sb = sb_dequeue(ipvs)))
ip_vs_sync_buff_release(sb);
- }
/* clean up the current sync_buff */
- if ((sb = get_curr_sync_buff(0))) {
+ sb = get_curr_sync_buff(ipvs, 0);
+ if (sb)
ip_vs_sync_buff_release(sb);
- }
/* release the sending multicast socket */
sock_release(tinfo->sock);
@@ -803,11 +1484,12 @@ static int sync_thread_master(void *data)
static int sync_thread_backup(void *data)
{
struct ip_vs_sync_thread_data *tinfo = data;
+ struct netns_ipvs *ipvs = net_ipvs(tinfo->net);
int len;
pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
"syncid = %d\n",
- ip_vs_backup_mcast_ifn, ip_vs_backup_syncid);
+ ipvs->backup_mcast_ifn, ipvs->backup_syncid);
while (!kthread_should_stop()) {
wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
@@ -817,7 +1499,7 @@ static int sync_thread_backup(void *data)
/* do we have data now? */
while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) {
len = ip_vs_receive(tinfo->sock, tinfo->buf,
- sync_recv_mesg_maxlen);
+ ipvs->recv_mesg_maxlen);
if (len <= 0) {
pr_err("receiving message error\n");
break;
@@ -826,7 +1508,7 @@ static int sync_thread_backup(void *data)
/* disable bottom half, because it accesses the data
shared by softirq while getting/creating conns */
local_bh_disable();
- ip_vs_process_message(tinfo->buf, len);
+ ip_vs_process_message(tinfo->net, tinfo->buf, len);
local_bh_enable();
}
}
@@ -840,41 +1522,42 @@ static int sync_thread_backup(void *data)
}
-int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
+int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
{
struct ip_vs_sync_thread_data *tinfo;
struct task_struct **realtask, *task;
struct socket *sock;
+ struct netns_ipvs *ipvs = net_ipvs(net);
char *name, *buf = NULL;
int (*threadfn)(void *data);
int result = -ENOMEM;
IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
- sizeof(struct ip_vs_sync_conn));
+ sizeof(struct ip_vs_sync_conn_v0));
if (state == IP_VS_STATE_MASTER) {
- if (sync_master_thread)
+ if (ipvs->master_thread)
return -EEXIST;
- strlcpy(ip_vs_master_mcast_ifn, mcast_ifn,
- sizeof(ip_vs_master_mcast_ifn));
- ip_vs_master_syncid = syncid;
- realtask = &sync_master_thread;
- name = "ipvs_syncmaster";
+ strlcpy(ipvs->master_mcast_ifn, mcast_ifn,
+ sizeof(ipvs->master_mcast_ifn));
+ ipvs->master_syncid = syncid;
+ realtask = &ipvs->master_thread;
+ name = "ipvs_master:%d";
threadfn = sync_thread_master;
- sock = make_send_sock();
+ sock = make_send_sock(net);
} else if (state == IP_VS_STATE_BACKUP) {
- if (sync_backup_thread)
+ if (ipvs->backup_thread)
return -EEXIST;
- strlcpy(ip_vs_backup_mcast_ifn, mcast_ifn,
- sizeof(ip_vs_backup_mcast_ifn));
- ip_vs_backup_syncid = syncid;
- realtask = &sync_backup_thread;
- name = "ipvs_syncbackup";
+ strlcpy(ipvs->backup_mcast_ifn, mcast_ifn,
+ sizeof(ipvs->backup_mcast_ifn));
+ ipvs->backup_syncid = syncid;
+ realtask = &ipvs->backup_thread;
+ name = "ipvs_backup:%d";
threadfn = sync_thread_backup;
- sock = make_receive_sock();
+ sock = make_receive_sock(net);
} else {
return -EINVAL;
}
@@ -884,9 +1567,9 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
goto out;
}
- set_sync_mesg_maxlen(state);
+ set_sync_mesg_maxlen(net, state);
if (state == IP_VS_STATE_BACKUP) {
- buf = kmalloc(sync_recv_mesg_maxlen, GFP_KERNEL);
+ buf = kmalloc(ipvs->recv_mesg_maxlen, GFP_KERNEL);
if (!buf)
goto outsocket;
}
@@ -895,10 +1578,11 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
if (!tinfo)
goto outbuf;
+ tinfo->net = net;
tinfo->sock = sock;
tinfo->buf = buf;
- task = kthread_run(threadfn, tinfo, name);
+ task = kthread_run(threadfn, tinfo, name, ipvs->gen);
if (IS_ERR(task)) {
result = PTR_ERR(task);
goto outtinfo;
@@ -906,7 +1590,7 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
/* mark as active */
*realtask = task;
- ip_vs_sync_state |= state;
+ ipvs->sync_state |= state;
/* increase the module use count */
ip_vs_use_count_inc();
@@ -924,16 +1608,18 @@ out:
}
-int stop_sync_thread(int state)
+int stop_sync_thread(struct net *net, int state)
{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
if (state == IP_VS_STATE_MASTER) {
- if (!sync_master_thread)
+ if (!ipvs->master_thread)
return -ESRCH;
pr_info("stopping master sync thread %d ...\n",
- task_pid_nr(sync_master_thread));
+ task_pid_nr(ipvs->master_thread));
/*
* The lock synchronizes with sb_queue_tail(), so that we don't
@@ -941,21 +1627,21 @@ int stop_sync_thread(int state)
* progress of stopping the master sync daemon.
*/
- spin_lock_bh(&ip_vs_sync_lock);
- ip_vs_sync_state &= ~IP_VS_STATE_MASTER;
- spin_unlock_bh(&ip_vs_sync_lock);
- kthread_stop(sync_master_thread);
- sync_master_thread = NULL;
+ spin_lock_bh(&ipvs->sync_lock);
+ ipvs->sync_state &= ~IP_VS_STATE_MASTER;
+ spin_unlock_bh(&ipvs->sync_lock);
+ kthread_stop(ipvs->master_thread);
+ ipvs->master_thread = NULL;
} else if (state == IP_VS_STATE_BACKUP) {
- if (!sync_backup_thread)
+ if (!ipvs->backup_thread)
return -ESRCH;
pr_info("stopping backup sync thread %d ...\n",
- task_pid_nr(sync_backup_thread));
+ task_pid_nr(ipvs->backup_thread));
- ip_vs_sync_state &= ~IP_VS_STATE_BACKUP;
- kthread_stop(sync_backup_thread);
- sync_backup_thread = NULL;
+ ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
+ kthread_stop(ipvs->backup_thread);
+ ipvs->backup_thread = NULL;
} else {
return -EINVAL;
}
@@ -965,3 +1651,42 @@ int stop_sync_thread(int state)
return 0;
}
+
+/*
+ * Initialize data struct for each netns
+ */
+static int __net_init __ip_vs_sync_init(struct net *net)
+{
+ struct netns_ipvs *ipvs = net_ipvs(net);
+
+ INIT_LIST_HEAD(&ipvs->sync_queue);
+ spin_lock_init(&ipvs->sync_lock);
+ spin_lock_init(&ipvs->sync_buff_lock);
+
+ ipvs->sync_mcast_addr.sin_family = AF_INET;
+ ipvs->sync_mcast_addr.sin_port = cpu_to_be16(IP_VS_SYNC_PORT);
+ ipvs->sync_mcast_addr.sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP);
+ return 0;
+}
+
+static void __ip_vs_sync_cleanup(struct net *net)
+{
+ stop_sync_thread(net, IP_VS_STATE_MASTER);
+ stop_sync_thread(net, IP_VS_STATE_BACKUP);
+}
+
+static struct pernet_operations ipvs_sync_ops = {
+ .init = __ip_vs_sync_init,
+ .exit = __ip_vs_sync_cleanup,
+};
+
+
+int __init ip_vs_sync_init(void)
+{
+ return register_pernet_subsys(&ipvs_sync_ops);
+}
+
+void ip_vs_sync_cleanup(void)
+{
+ unregister_pernet_subsys(&ipvs_sync_ops);
+}
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 5325a3fb..1f2a4e3 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -175,7 +175,6 @@ __ip_vs_reroute_locally(struct sk_buff *skb)
.fl4_tos = RT_TOS(iph->tos),
.mark = skb->mark,
};
- struct rtable *rt;
if (ip_route_output_key(net, &rt, &fl))
return 0;
@@ -390,7 +389,8 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* MTU checking */
mtu = dst_mtu(&rt->dst);
- if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
+ if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
+ !skb_is_gso(skb)) {
ip_rt_put(rt);
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
@@ -443,7 +443,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* MTU checking */
mtu = dst_mtu(&rt->dst);
- if (skb->len > mtu) {
+ if (skb->len > mtu && !skb_is_gso(skb)) {
if (!skb->dev) {
struct net *net = dev_net(skb_dst(skb)->dev);
@@ -543,7 +543,8 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* MTU checking */
mtu = dst_mtu(&rt->dst);
- if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
+ if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
+ !skb_is_gso(skb)) {
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
IP_VS_DBG_RL_PKT(0, AF_INET, pp, skb, 0,
"ip_vs_nat_xmit(): frag needed for");
@@ -658,7 +659,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* MTU checking */
mtu = dst_mtu(&rt->dst);
- if (skb->len > mtu) {
+ if (skb->len > mtu && !skb_is_gso(skb)) {
if (!skb->dev) {
struct net *net = dev_net(skb_dst(skb)->dev);
@@ -773,8 +774,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
df |= (old_iph->frag_off & htons(IP_DF));
- if ((old_iph->frag_off & htons(IP_DF))
- && mtu < ntohs(old_iph->tot_len)) {
+ if ((old_iph->frag_off & htons(IP_DF) &&
+ mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb))) {
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error_put;
@@ -886,7 +887,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
if (skb_dst(skb))
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
- if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
+ if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr) &&
+ !skb_is_gso(skb)) {
if (!skb->dev) {
struct net *net = dev_net(skb_dst(skb)->dev);
@@ -991,7 +993,8 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* MTU checking */
mtu = dst_mtu(&rt->dst);
- if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) {
+ if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu &&
+ !skb_is_gso(skb)) {
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
ip_rt_put(rt);
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
@@ -1158,7 +1161,8 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
/* MTU checking */
mtu = dst_mtu(&rt->dst);
- if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) {
+ if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF)) &&
+ !skb_is_gso(skb)) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error_put;
@@ -1272,7 +1276,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* MTU checking */
mtu = dst_mtu(&rt->dst);
- if (skb->len > mtu) {
+ if (skb->len > mtu && !skb_is_gso(skb)) {
if (!skb->dev) {
struct net *net = dev_net(skb_dst(skb)->dev);
diff --git a/net/netfilter/nf_conntrack_broadcast.c b/net/netfilter/nf_conntrack_broadcast.c
new file mode 100644
index 0000000..4e99cca
--- /dev/null
+++ b/net/netfilter/nf_conntrack_broadcast.c
@@ -0,0 +1,82 @@
+/*
+ * broadcast connection tracking helper
+ *
+ * (c) 2005 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <net/route.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+
+int nf_conntrack_broadcast_help(struct sk_buff *skb,
+ unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int timeout)
+{
+ struct nf_conntrack_expect *exp;
+ struct iphdr *iph = ip_hdr(skb);
+ struct rtable *rt = skb_rtable(skb);
+ struct in_device *in_dev;
+ struct nf_conn_help *help = nfct_help(ct);
+ __be32 mask = 0;
+
+ /* we're only interested in locally generated packets */
+ if (skb->sk == NULL)
+ goto out;
+ if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST))
+ goto out;
+ if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
+ goto out;
+
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(rt->dst.dev);
+ if (in_dev != NULL) {
+ for_primary_ifa(in_dev) {
+ if (ifa->ifa_broadcast == iph->daddr) {
+ mask = ifa->ifa_mask;
+ break;
+ }
+ } endfor_ifa(in_dev);
+ }
+ rcu_read_unlock();
+
+ if (mask == 0)
+ goto out;
+
+ exp = nf_ct_expect_alloc(ct);
+ if (exp == NULL)
+ goto out;
+
+ exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+ exp->tuple.src.u.udp.port = help->helper->tuple.src.u.udp.port;
+
+ exp->mask.src.u3.ip = mask;
+ exp->mask.src.u.udp.port = htons(0xFFFF);
+
+ exp->expectfn = NULL;
+ exp->flags = NF_CT_EXPECT_PERMANENT;
+ exp->class = NF_CT_EXPECT_CLASS_DEFAULT;
+ exp->helper = NULL;
+
+ nf_ct_expect_related(exp);
+ nf_ct_expect_put(exp);
+
+ nf_ct_refresh(ct, skb, timeout * HZ);
+out:
+ return NF_ACCEPT;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_broadcast_help);
+
+MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 84f4fcc..2f454ef 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -43,6 +43,7 @@
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_core.h>
@@ -282,6 +283,11 @@ EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
static void death_by_timeout(unsigned long ul_conntrack)
{
struct nf_conn *ct = (void *)ul_conntrack;
+ struct nf_conn_tstamp *tstamp;
+
+ tstamp = nf_conn_tstamp_find(ct);
+ if (tstamp && tstamp->stop == 0)
+ tstamp->stop = ktime_to_ns(ktime_get_real());
if (!test_bit(IPS_DYING_BIT, &ct->status) &&
unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
@@ -419,6 +425,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
struct nf_conn_help *help;
+ struct nf_conn_tstamp *tstamp;
struct hlist_nulls_node *n;
enum ip_conntrack_info ctinfo;
struct net *net;
@@ -486,8 +493,16 @@ __nf_conntrack_confirm(struct sk_buff *skb)
ct->timeout.expires += jiffies;
add_timer(&ct->timeout);
atomic_inc(&ct->ct_general.use);
- set_bit(IPS_CONFIRMED_BIT, &ct->status);
+ ct->status |= IPS_CONFIRMED;
+
+ /* set conntrack timestamp, if enabled. */
+ tstamp = nf_conn_tstamp_find(ct);
+ if (tstamp) {
+ if (skb->tstamp.tv64 == 0)
+ __net_timestamp((struct sk_buff *)skb);
+ tstamp->start = ktime_to_ns(skb->tstamp);
+ }
/* Since the lookup is lockless, hash insertion must be done after
* starting the timer and setting the CONFIRMED bit. The RCU barriers
* guarantee that no other CPU can find the conntrack before the above
@@ -655,7 +670,8 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
* and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged.
*/
memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
- sizeof(*ct) - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
+ offsetof(struct nf_conn, proto) -
+ offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
spin_lock_init(&ct->lock);
ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
@@ -745,6 +761,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
}
nf_ct_acct_ext_add(ct, GFP_ATOMIC);
+ nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
@@ -1192,6 +1209,11 @@ struct __nf_ct_flush_report {
static int kill_report(struct nf_conn *i, void *data)
{
struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
+ struct nf_conn_tstamp *tstamp;
+
+ tstamp = nf_conn_tstamp_find(i);
+ if (tstamp && tstamp->stop == 0)
+ tstamp->stop = ktime_to_ns(ktime_get_real());
/* If we fail to deliver the event, death_by_timeout() will retry */
if (nf_conntrack_event_report(IPCT_DESTROY, i,
@@ -1208,9 +1230,9 @@ static int kill_all(struct nf_conn *i, void *data)
return 1;
}
-void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size)
+void nf_ct_free_hashtable(void *hash, unsigned int size)
{
- if (vmalloced)
+ if (is_vmalloc_addr(hash))
vfree(hash);
else
free_pages((unsigned long)hash,
@@ -1277,8 +1299,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
goto i_see_dead_people;
}
- nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
- net->ct.htable_size);
+ nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
nf_conntrack_ecache_fini(net);
nf_conntrack_acct_fini(net);
nf_conntrack_expect_fini(net);
@@ -1307,21 +1328,18 @@ void nf_conntrack_cleanup(struct net *net)
}
}
-void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
+void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
{
struct hlist_nulls_head *hash;
unsigned int nr_slots, i;
size_t sz;
- *vmalloced = 0;
-
BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
sz = nr_slots * sizeof(struct hlist_nulls_head);
hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
get_order(sz));
if (!hash) {
- *vmalloced = 1;
printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
PAGE_KERNEL);
@@ -1337,7 +1355,7 @@ EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
{
- int i, bucket, vmalloced, old_vmalloced;
+ int i, bucket;
unsigned int hashsize, old_size;
struct hlist_nulls_head *hash, *old_hash;
struct nf_conntrack_tuple_hash *h;
@@ -1354,7 +1372,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
if (!hashsize)
return -EINVAL;
- hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1);
+ hash = nf_ct_alloc_hashtable(&hashsize, 1);
if (!hash)
return -ENOMEM;
@@ -1376,15 +1394,13 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
}
}
old_size = init_net.ct.htable_size;
- old_vmalloced = init_net.ct.hash_vmalloc;
old_hash = init_net.ct.hash;
init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
- init_net.ct.hash_vmalloc = vmalloced;
init_net.ct.hash = hash;
spin_unlock_bh(&nf_conntrack_lock);
- nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
+ nf_ct_free_hashtable(old_hash, old_size);
return 0;
}
EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
@@ -1497,8 +1513,7 @@ static int nf_conntrack_init_net(struct net *net)
}
net->ct.htable_size = nf_conntrack_htable_size;
- net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
- &net->ct.hash_vmalloc, 1);
+ net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1);
if (!net->ct.hash) {
ret = -ENOMEM;
printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
@@ -1510,6 +1525,9 @@ static int nf_conntrack_init_net(struct net *net)
ret = nf_conntrack_acct_init(net);
if (ret < 0)
goto err_acct;
+ ret = nf_conntrack_tstamp_init(net);
+ if (ret < 0)
+ goto err_tstamp;
ret = nf_conntrack_ecache_init(net);
if (ret < 0)
goto err_ecache;
@@ -1517,12 +1535,13 @@ static int nf_conntrack_init_net(struct net *net)
return 0;
err_ecache:
+ nf_conntrack_tstamp_fini(net);
+err_tstamp:
nf_conntrack_acct_fini(net);
err_acct:
nf_conntrack_expect_fini(net);
err_expect:
- nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
- net->ct.htable_size);
+ nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
err_hash:
kmem_cache_destroy(net->ct.nf_conntrack_cachep);
err_cache:
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index a20fb0b..cd1e8e0 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -319,7 +319,8 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
const struct nf_conntrack_expect_policy *p;
unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
- atomic_inc(&exp->use);
+ /* two references : one for hash insert, one for the timer */
+ atomic_add(2, &exp->use);
if (master_help) {
hlist_add_head(&exp->lnode, &master_help->expectations);
@@ -333,12 +334,14 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
(unsigned long)exp);
if (master_help) {
- p = &master_help->helper->expect_policy[exp->class];
+ p = &rcu_dereference_protected(
+ master_help->helper,
+ lockdep_is_held(&nf_conntrack_lock)
+ )->expect_policy[exp->class];
exp->timeout.expires = jiffies + p->timeout * HZ;
}
add_timer(&exp->timeout);
- atomic_inc(&exp->use);
NF_CT_STAT_INC(net, expect_create);
}
@@ -369,7 +372,10 @@ static inline int refresh_timer(struct nf_conntrack_expect *i)
if (!del_timer(&i->timeout))
return 0;
- p = &master_help->helper->expect_policy[i->class];
+ p = &rcu_dereference_protected(
+ master_help->helper,
+ lockdep_is_held(&nf_conntrack_lock)
+ )->expect_policy[i->class];
i->timeout.expires = jiffies + p->timeout * HZ;
add_timer(&i->timeout);
return 1;
@@ -407,7 +413,10 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
}
/* Will be over limit? */
if (master_help) {
- p = &master_help->helper->expect_policy[expect->class];
+ p = &rcu_dereference_protected(
+ master_help->helper,
+ lockdep_is_held(&nf_conntrack_lock)
+ )->expect_policy[expect->class];
if (p->max_expected &&
master_help->expecting[expect->class] >= p->max_expected) {
evict_oldest_expect(master, expect);
@@ -478,7 +487,7 @@ static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
struct hlist_node *n;
for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
- n = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+ n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
if (n)
return n;
}
@@ -491,11 +500,11 @@ static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
struct net *net = seq_file_net(seq);
struct ct_expect_iter_state *st = seq->private;
- head = rcu_dereference(head->next);
+ head = rcu_dereference(hlist_next_rcu(head));
while (head == NULL) {
if (++st->bucket >= nf_ct_expect_hsize)
return NULL;
- head = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+ head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
}
return head;
}
@@ -630,8 +639,7 @@ int nf_conntrack_expect_init(struct net *net)
}
net->ct.expect_count = 0;
- net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize,
- &net->ct.expect_vmalloc, 0);
+ net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
if (net->ct.expect_hash == NULL)
goto err1;
@@ -653,8 +661,7 @@ err3:
if (net_eq(net, &init_net))
kmem_cache_destroy(nf_ct_expect_cachep);
err2:
- nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
- nf_ct_expect_hsize);
+ nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
err1:
return err;
}
@@ -666,6 +673,5 @@ void nf_conntrack_expect_fini(struct net *net)
rcu_barrier(); /* Wait for call_rcu() before destroy */
kmem_cache_destroy(nf_ct_expect_cachep);
}
- nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
- nf_ct_expect_hsize);
+ nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
}
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index bd82450..80a23ed 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -140,15 +140,16 @@ static void update_alloc_size(struct nf_ct_ext_type *type)
/* This assumes that extended areas in conntrack for the types
whose NF_CT_EXT_F_PREALLOC bit set are allocated in order */
for (i = min; i <= max; i++) {
- t1 = nf_ct_ext_types[i];
+ t1 = rcu_dereference_protected(nf_ct_ext_types[i],
+ lockdep_is_held(&nf_ct_ext_type_mutex));
if (!t1)
continue;
- t1->alloc_size = sizeof(struct nf_ct_ext)
- + ALIGN(sizeof(struct nf_ct_ext), t1->align)
- + t1->len;
+ t1->alloc_size = ALIGN(sizeof(struct nf_ct_ext), t1->align) +
+ t1->len;
for (j = 0; j < NF_CT_EXT_NUM; j++) {
- t2 = nf_ct_ext_types[j];
+ t2 = rcu_dereference_protected(nf_ct_ext_types[j],
+ lockdep_is_held(&nf_ct_ext_type_mutex));
if (t2 == NULL || t2 == t1 ||
(t2->flags & NF_CT_EXT_F_PREALLOC) == 0)
continue;
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 59e1a4c..1bdfea3 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -33,7 +33,6 @@ static DEFINE_MUTEX(nf_ct_helper_mutex);
static struct hlist_head *nf_ct_helper_hash __read_mostly;
static unsigned int nf_ct_helper_hsize __read_mostly;
static unsigned int nf_ct_helper_count __read_mostly;
-static int nf_ct_helper_vmalloc;
/* Stupid hash, but collision free for the default registrations of the
@@ -158,7 +157,10 @@ static inline int unhelp(struct nf_conntrack_tuple_hash *i,
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(i);
struct nf_conn_help *help = nfct_help(ct);
- if (help && help->helper == me) {
+ if (help && rcu_dereference_protected(
+ help->helper,
+ lockdep_is_held(&nf_conntrack_lock)
+ ) == me) {
nf_conntrack_event(IPCT_HELPER, ct);
rcu_assign_pointer(help->helper, NULL);
}
@@ -210,7 +212,10 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
hlist_for_each_entry_safe(exp, n, next,
&net->ct.expect_hash[i], hnode) {
struct nf_conn_help *help = nfct_help(exp->master);
- if ((help->helper == me || exp->helper == me) &&
+ if ((rcu_dereference_protected(
+ help->helper,
+ lockdep_is_held(&nf_conntrack_lock)
+ ) == me || exp->helper == me) &&
del_timer(&exp->timeout)) {
nf_ct_unlink_expect(exp);
nf_ct_expect_put(exp);
@@ -261,8 +266,7 @@ int nf_conntrack_helper_init(void)
int err;
nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
- nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize,
- &nf_ct_helper_vmalloc, 0);
+ nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0);
if (!nf_ct_helper_hash)
return -ENOMEM;
@@ -273,14 +277,12 @@ int nf_conntrack_helper_init(void)
return 0;
err1:
- nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_vmalloc,
- nf_ct_helper_hsize);
+ nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
return err;
}
void nf_conntrack_helper_fini(void)
{
nf_ct_extend_unregister(&helper_extend);
- nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_vmalloc,
- nf_ct_helper_hsize);
+ nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
}
diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c
index aadde01..4c8f30a 100644
--- a/net/netfilter/nf_conntrack_netbios_ns.c
+++ b/net/netfilter/nf_conntrack_netbios_ns.c
@@ -18,14 +18,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/if_addr.h>
#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/netfilter.h>
-#include <net/route.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
@@ -40,75 +33,26 @@ MODULE_ALIAS("ip_conntrack_netbios_ns");
MODULE_ALIAS_NFCT_HELPER("netbios_ns");
static unsigned int timeout __read_mostly = 3;
-module_param(timeout, uint, 0400);
+module_param(timeout, uint, S_IRUSR);
MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
-static int help(struct sk_buff *skb, unsigned int protoff,
- struct nf_conn *ct, enum ip_conntrack_info ctinfo)
-{
- struct nf_conntrack_expect *exp;
- struct iphdr *iph = ip_hdr(skb);
- struct rtable *rt = skb_rtable(skb);
- struct in_device *in_dev;
- __be32 mask = 0;
-
- /* we're only interested in locally generated packets */
- if (skb->sk == NULL)
- goto out;
- if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST))
- goto out;
- if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
- goto out;
-
- rcu_read_lock();
- in_dev = __in_dev_get_rcu(rt->dst.dev);
- if (in_dev != NULL) {
- for_primary_ifa(in_dev) {
- if (ifa->ifa_broadcast == iph->daddr) {
- mask = ifa->ifa_mask;
- break;
- }
- } endfor_ifa(in_dev);
- }
- rcu_read_unlock();
-
- if (mask == 0)
- goto out;
-
- exp = nf_ct_expect_alloc(ct);
- if (exp == NULL)
- goto out;
-
- exp->tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
- exp->tuple.src.u.udp.port = htons(NMBD_PORT);
-
- exp->mask.src.u3.ip = mask;
- exp->mask.src.u.udp.port = htons(0xFFFF);
-
- exp->expectfn = NULL;
- exp->flags = NF_CT_EXPECT_PERMANENT;
- exp->class = NF_CT_EXPECT_CLASS_DEFAULT;
- exp->helper = NULL;
-
- nf_ct_expect_related(exp);
- nf_ct_expect_put(exp);
-
- nf_ct_refresh(ct, skb, timeout * HZ);
-out:
- return NF_ACCEPT;
-}
-
static struct nf_conntrack_expect_policy exp_policy = {
.max_expected = 1,
};
+static int netbios_ns_help(struct sk_buff *skb, unsigned int protoff,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+ return nf_conntrack_broadcast_help(skb, protoff, ct, ctinfo, timeout);
+}
+
static struct nf_conntrack_helper helper __read_mostly = {
.name = "netbios-ns",
- .tuple.src.l3num = AF_INET,
+ .tuple.src.l3num = NFPROTO_IPV4,
.tuple.src.u.udp.port = cpu_to_be16(NMBD_PORT),
.tuple.dst.protonum = IPPROTO_UDP,
.me = THIS_MODULE,
- .help = help,
+ .help = netbios_ns_help,
.expect_policy = &exp_policy,
};
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index eead9db..30bf8a1 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -42,6 +42,7 @@
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
#ifdef CONFIG_NF_NAT_NEEDED
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_protocol.h>
@@ -230,6 +231,33 @@ nla_put_failure:
return -1;
}
+static int
+ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
+{
+ struct nlattr *nest_count;
+ const struct nf_conn_tstamp *tstamp;
+
+ tstamp = nf_conn_tstamp_find(ct);
+ if (!tstamp)
+ return 0;
+
+ nest_count = nla_nest_start(skb, CTA_TIMESTAMP | NLA_F_NESTED);
+ if (!nest_count)
+ goto nla_put_failure;
+
+ NLA_PUT_BE64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start));
+ if (tstamp->stop != 0) {
+ NLA_PUT_BE64(skb, CTA_TIMESTAMP_STOP,
+ cpu_to_be64(tstamp->stop));
+ }
+ nla_nest_end(skb, nest_count);
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
#ifdef CONFIG_NF_CONNTRACK_MARK
static inline int
ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
@@ -404,6 +432,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
ctnetlink_dump_timeout(skb, ct) < 0 ||
ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 ||
+ ctnetlink_dump_timestamp(skb, ct) < 0 ||
ctnetlink_dump_protoinfo(skb, ct) < 0 ||
ctnetlink_dump_helpinfo(skb, ct) < 0 ||
ctnetlink_dump_mark(skb, ct) < 0 ||
@@ -471,6 +500,18 @@ ctnetlink_secctx_size(const struct nf_conn *ct)
}
static inline size_t
+ctnetlink_timestamp_size(const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
+ return 0;
+ return nla_total_size(0) + 2 * nla_total_size(sizeof(uint64_t));
+#else
+ return 0;
+#endif
+}
+
+static inline size_t
ctnetlink_nlmsg_size(const struct nf_conn *ct)
{
return NLMSG_ALIGN(sizeof(struct nfgenmsg))
@@ -481,6 +522,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
+ nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
+ nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
+ ctnetlink_counters_size(ct)
+ + ctnetlink_timestamp_size(ct)
+ nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
+ nla_total_size(0) /* CTA_PROTOINFO */
+ nla_total_size(0) /* CTA_HELP */
@@ -571,7 +613,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
if (events & (1 << IPCT_DESTROY)) {
if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
- ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)
+ ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 ||
+ ctnetlink_dump_timestamp(skb, ct) < 0)
goto nla_put_failure;
} else {
if (ctnetlink_dump_timeout(skb, ct) < 0)
@@ -761,7 +804,7 @@ static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
static int
ctnetlink_parse_tuple(const struct nlattr * const cda[],
struct nf_conntrack_tuple *tuple,
- enum ctattr_tuple type, u_int8_t l3num)
+ enum ctattr_type type, u_int8_t l3num)
{
struct nlattr *tb[CTA_TUPLE_MAX+1];
int err;
@@ -1358,6 +1401,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
}
nf_ct_acct_ext_add(ct, GFP_ATOMIC);
+ nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
/* we must add conntrack extensions before confirmation. */
ct->status |= IPS_CONFIRMED;
@@ -1376,6 +1420,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
}
#endif
+ memset(&ct->proto, 0, sizeof(ct->proto));
if (cda[CTA_PROTOINFO]) {
err = ctnetlink_change_protoinfo(ct, cda);
if (err < 0)
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index dc7bb74..5701c8d 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -166,6 +166,7 @@ static void nf_ct_l3proto_unregister_sysctl(struct nf_conntrack_l3proto *l3proto
int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
{
int ret = 0;
+ struct nf_conntrack_l3proto *old;
if (proto->l3proto >= AF_MAX)
return -EBUSY;
@@ -174,7 +175,9 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
return -EINVAL;
mutex_lock(&nf_ct_proto_mutex);
- if (nf_ct_l3protos[proto->l3proto] != &nf_conntrack_l3proto_generic) {
+ old = rcu_dereference_protected(nf_ct_l3protos[proto->l3proto],
+ lockdep_is_held(&nf_ct_proto_mutex));
+ if (old != &nf_conntrack_l3proto_generic) {
ret = -EBUSY;
goto out_unlock;
}
@@ -201,7 +204,9 @@ void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
BUG_ON(proto->l3proto >= AF_MAX);
mutex_lock(&nf_ct_proto_mutex);
- BUG_ON(nf_ct_l3protos[proto->l3proto] != proto);
+ BUG_ON(rcu_dereference_protected(nf_ct_l3protos[proto->l3proto],
+ lockdep_is_held(&nf_ct_proto_mutex)
+ ) != proto);
rcu_assign_pointer(nf_ct_l3protos[proto->l3proto],
&nf_conntrack_l3proto_generic);
nf_ct_l3proto_unregister_sysctl(proto);
@@ -279,7 +284,7 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
mutex_lock(&nf_ct_proto_mutex);
if (!nf_ct_protos[l4proto->l3proto]) {
/* l3proto may be loaded latter. */
- struct nf_conntrack_l4proto **proto_array;
+ struct nf_conntrack_l4proto __rcu **proto_array;
int i;
proto_array = kmalloc(MAX_NF_CT_PROTO *
@@ -291,7 +296,7 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
}
for (i = 0; i < MAX_NF_CT_PROTO; i++)
- proto_array[i] = &nf_conntrack_l4proto_generic;
+ RCU_INIT_POINTER(proto_array[i], &nf_conntrack_l4proto_generic);
/* Before making proto_array visible to lockless readers,
* we must make sure its content is committed to memory.
@@ -299,8 +304,10 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
smp_wmb();
nf_ct_protos[l4proto->l3proto] = proto_array;
- } else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] !=
- &nf_conntrack_l4proto_generic) {
+ } else if (rcu_dereference_protected(
+ nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
+ lockdep_is_held(&nf_ct_proto_mutex)
+ ) != &nf_conntrack_l4proto_generic) {
ret = -EBUSY;
goto out_unlock;
}
@@ -331,7 +338,10 @@ void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto)
BUG_ON(l4proto->l3proto >= PF_MAX);
mutex_lock(&nf_ct_proto_mutex);
- BUG_ON(nf_ct_protos[l4proto->l3proto][l4proto->l4proto] != l4proto);
+ BUG_ON(rcu_dereference_protected(
+ nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
+ lockdep_is_held(&nf_ct_proto_mutex)
+ ) != l4proto);
rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
&nf_conntrack_l4proto_generic);
nf_ct_l4proto_unregister_sysctl(l4proto);
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 5292560..9ae57c5 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -452,6 +452,9 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT;
ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER;
ct->proto.dccp.state = CT_DCCP_NONE;
+ ct->proto.dccp.last_pkt = DCCP_PKT_REQUEST;
+ ct->proto.dccp.last_dir = IP_CT_DIR_ORIGINAL;
+ ct->proto.dccp.handshake_seq = 0;
return true;
out_invalid:
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index c6049c2..6f4ee70 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -413,6 +413,7 @@ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
test_bit(SCTP_CID_COOKIE_ACK, map))
return false;
+ memset(&ct->proto.sctp, 0, sizeof(ct->proto.sctp));
new_state = SCTP_CONNTRACK_MAX;
for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
/* Don't need lock here: this conntrack not in circulation yet */
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 3fb2b73..6f38d0e 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -1066,9 +1066,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
BUG_ON(th == NULL);
/* Don't need lock here: this conntrack not in circulation yet */
- new_state
- = tcp_conntracks[0][get_conntrack_index(th)]
- [TCP_CONNTRACK_NONE];
+ new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
/* Invalid: delete conntrack */
if (new_state >= TCP_CONNTRACK_MAX) {
@@ -1077,6 +1075,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
}
if (new_state == TCP_CONNTRACK_SYN_SENT) {
+ memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
/* SYN packet */
ct->proto.tcp.seen[0].td_end =
segment_seq_plus_len(ntohl(th->seq), skb->len,
@@ -1088,11 +1087,11 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
ct->proto.tcp.seen[0].td_end;
tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
- ct->proto.tcp.seen[1].flags = 0;
} else if (nf_ct_tcp_loose == 0) {
/* Don't try to pick up connections. */
return false;
} else {
+ memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
/*
* We are in the middle of a connection,
* its history is lost for us.
@@ -1107,7 +1106,6 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
ct->proto.tcp.seen[0].td_maxend =
ct->proto.tcp.seen[0].td_end +
ct->proto.tcp.seen[0].td_maxwin;
- ct->proto.tcp.seen[0].td_scale = 0;
/* We assume SACK and liberal window checking to handle
* window scaling */
@@ -1116,13 +1114,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
IP_CT_TCP_FLAG_BE_LIBERAL;
}
- ct->proto.tcp.seen[1].td_end = 0;
- ct->proto.tcp.seen[1].td_maxend = 0;
- ct->proto.tcp.seen[1].td_maxwin = 0;
- ct->proto.tcp.seen[1].td_scale = 0;
-
/* tcp_packet will set them */
- ct->proto.tcp.state = TCP_CONNTRACK_NONE;
ct->proto.tcp.last_index = TCP_NONE_SET;
pr_debug("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i "
diff --git a/net/netfilter/nf_conntrack_snmp.c b/net/netfilter/nf_conntrack_snmp.c
new file mode 100644
index 0000000..6e545e2
--- /dev/null
+++ b/net/netfilter/nf_conntrack_snmp.c
@@ -0,0 +1,77 @@
+/*
+ * SNMP service broadcast connection tracking helper
+ *
+ * (c) 2011 Jiri Olsa <jolsa@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/in.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+
+#define SNMP_PORT 161
+
+MODULE_AUTHOR("Jiri Olsa <jolsa@redhat.com>");
+MODULE_DESCRIPTION("SNMP service broadcast connection tracking helper");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NFCT_HELPER("snmp");
+
+static unsigned int timeout __read_mostly = 30;
+module_param(timeout, uint, S_IRUSR);
+MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
+
+int (*nf_nat_snmp_hook)(struct sk_buff *skb,
+ unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo);
+EXPORT_SYMBOL_GPL(nf_nat_snmp_hook);
+
+static int snmp_conntrack_help(struct sk_buff *skb, unsigned int protoff,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+ typeof(nf_nat_snmp_hook) nf_nat_snmp;
+
+ nf_conntrack_broadcast_help(skb, protoff, ct, ctinfo, timeout);
+
+ nf_nat_snmp = rcu_dereference(nf_nat_snmp_hook);
+ if (nf_nat_snmp && ct->status & IPS_NAT_MASK)
+ return nf_nat_snmp(skb, protoff, ct, ctinfo);
+
+ return NF_ACCEPT;
+}
+
+static struct nf_conntrack_expect_policy exp_policy = {
+ .max_expected = 1,
+};
+
+static struct nf_conntrack_helper helper __read_mostly = {
+ .name = "snmp",
+ .tuple.src.l3num = NFPROTO_IPV4,
+ .tuple.src.u.udp.port = cpu_to_be16(SNMP_PORT),
+ .tuple.dst.protonum = IPPROTO_UDP,
+ .me = THIS_MODULE,
+ .help = snmp_conntrack_help,
+ .expect_policy = &exp_policy,
+};
+
+static int __init nf_conntrack_snmp_init(void)
+{
+ exp_policy.timeout = timeout;
+ return nf_conntrack_helper_register(&helper);
+}
+
+static void __exit nf_conntrack_snmp_fini(void)
+{
+ nf_conntrack_helper_unregister(&helper);
+}
+
+module_init(nf_conntrack_snmp_init);
+module_exit(nf_conntrack_snmp_fini);
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index b4d7f0f..0ae1428 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -29,6 +29,8 @@
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+#include <linux/rculist_nulls.h>
MODULE_LICENSE("GPL");
@@ -45,6 +47,7 @@ EXPORT_SYMBOL_GPL(print_tuple);
struct ct_iter_state {
struct seq_net_private p;
unsigned int bucket;
+ u_int64_t time_now;
};
static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
@@ -56,7 +59,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
for (st->bucket = 0;
st->bucket < net->ct.htable_size;
st->bucket++) {
- n = rcu_dereference(net->ct.hash[st->bucket].first);
+ n = rcu_dereference(hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
if (!is_a_nulls(n))
return n;
}
@@ -69,13 +72,15 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
struct net *net = seq_file_net(seq);
struct ct_iter_state *st = seq->private;
- head = rcu_dereference(head->next);
+ head = rcu_dereference(hlist_nulls_next_rcu(head));
while (is_a_nulls(head)) {
if (likely(get_nulls_value(head) == st->bucket)) {
if (++st->bucket >= net->ct.htable_size)
return NULL;
}
- head = rcu_dereference(net->ct.hash[st->bucket].first);
+ head = rcu_dereference(
+ hlist_nulls_first_rcu(
+ &net->ct.hash[st->bucket]));
}
return head;
}
@@ -93,6 +98,9 @@ static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
+ struct ct_iter_state *st = seq->private;
+
+ st->time_now = ktime_to_ns(ktime_get_real());
rcu_read_lock();
return ct_get_idx(seq, *pos);
}
@@ -132,6 +140,34 @@ static inline int ct_show_secctx(struct seq_file *s, const struct nf_conn *ct)
}
#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+static int ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
+{
+ struct ct_iter_state *st = s->private;
+ struct nf_conn_tstamp *tstamp;
+ s64 delta_time;
+
+ tstamp = nf_conn_tstamp_find(ct);
+ if (tstamp) {
+ delta_time = st->time_now - tstamp->start;
+ if (delta_time > 0)
+ delta_time = div_s64(delta_time, NSEC_PER_SEC);
+ else
+ delta_time = 0;
+
+ return seq_printf(s, "delta-time=%llu ",
+ (unsigned long long)delta_time);
+ }
+ return 0;
+}
+#else
+static inline int
+ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
+{
+ return 0;
+}
+#endif
+
/* return 0 on success, 1 in case of error */
static int ct_seq_show(struct seq_file *s, void *v)
{
@@ -200,6 +236,9 @@ static int ct_seq_show(struct seq_file *s, void *v)
goto release;
#endif
+ if (ct_show_delta_time(s, ct))
+ goto release;
+
if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
goto release;
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
new file mode 100644
index 0000000..af7dd31
--- /dev/null
+++ b/net/netfilter/nf_conntrack_timestamp.c
@@ -0,0 +1,120 @@
+/*
+ * (C) 2010 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation (or any later at your option).
+ */
+
+#include <linux/netfilter.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+
+static int nf_ct_tstamp __read_mostly;
+
+module_param_named(tstamp, nf_ct_tstamp, bool, 0644);
+MODULE_PARM_DESC(tstamp, "Enable connection tracking flow timestamping.");
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table tstamp_sysctl_table[] = {
+ {
+ .procname = "nf_conntrack_timestamp",
+ .data = &init_net.ct.sysctl_tstamp,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {}
+};
+#endif /* CONFIG_SYSCTL */
+
+static struct nf_ct_ext_type tstamp_extend __read_mostly = {
+ .len = sizeof(struct nf_conn_tstamp),
+ .align = __alignof__(struct nf_conn_tstamp),
+ .id = NF_CT_EXT_TSTAMP,
+};
+
+#ifdef CONFIG_SYSCTL
+static int nf_conntrack_tstamp_init_sysctl(struct net *net)
+{
+ struct ctl_table *table;
+
+ table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
+ GFP_KERNEL);
+ if (!table)
+ goto out;
+
+ table[0].data = &net->ct.sysctl_tstamp;
+
+ net->ct.tstamp_sysctl_header = register_net_sysctl_table(net,
+ nf_net_netfilter_sysctl_path, table);
+ if (!net->ct.tstamp_sysctl_header) {
+ printk(KERN_ERR "nf_ct_tstamp: can't register to sysctl.\n");
+ goto out_register;
+ }
+ return 0;
+
+out_register:
+ kfree(table);
+out:
+ return -ENOMEM;
+}
+
+static void nf_conntrack_tstamp_fini_sysctl(struct net *net)
+{
+ struct ctl_table *table;
+
+ table = net->ct.tstamp_sysctl_header->ctl_table_arg;
+ unregister_net_sysctl_table(net->ct.tstamp_sysctl_header);
+ kfree(table);
+}
+#else
+static int nf_conntrack_tstamp_init_sysctl(struct net *net)
+{
+ return 0;
+}
+
+static void nf_conntrack_tstamp_fini_sysctl(struct net *net)
+{
+}
+#endif
+
+int nf_conntrack_tstamp_init(struct net *net)
+{
+ int ret;
+
+ net->ct.sysctl_tstamp = nf_ct_tstamp;
+
+ if (net_eq(net, &init_net)) {
+ ret = nf_ct_extend_register(&tstamp_extend);
+ if (ret < 0) {
+ printk(KERN_ERR "nf_ct_tstamp: Unable to register "
+ "extension\n");
+ goto out_extend_register;
+ }
+ }
+
+ ret = nf_conntrack_tstamp_init_sysctl(net);
+ if (ret < 0)
+ goto out_sysctl;
+
+ return 0;
+
+out_sysctl:
+ if (net_eq(net, &init_net))
+ nf_ct_extend_unregister(&tstamp_extend);
+out_extend_register:
+ return ret;
+}
+
+void nf_conntrack_tstamp_fini(struct net *net)
+{
+ nf_conntrack_tstamp_fini_sysctl(net);
+ if (net_eq(net, &init_net))
+ nf_ct_extend_unregister(&tstamp_extend);
+}
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index b07393e..20c775c 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -161,7 +161,8 @@ static int seq_show(struct seq_file *s, void *v)
struct nf_logger *t;
int ret;
- logger = nf_loggers[*pos];
+ logger = rcu_dereference_protected(nf_loggers[*pos],
+ lockdep_is_held(&nf_log_mutex));
if (!logger)
ret = seq_printf(s, "%2lld NONE (", *pos);
@@ -249,7 +250,8 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
mutex_unlock(&nf_log_mutex);
} else {
mutex_lock(&nf_log_mutex);
- logger = nf_loggers[tindex];
+ logger = rcu_dereference_protected(nf_loggers[tindex],
+ lockdep_is_held(&nf_log_mutex));
if (!logger)
table->data = "NONE";
else
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 74aebed5..5ab22e2 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -27,14 +27,17 @@ static DEFINE_MUTEX(queue_handler_mutex);
int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
{
int ret;
+ const struct nf_queue_handler *old;
if (pf >= ARRAY_SIZE(queue_handler))
return -EINVAL;
mutex_lock(&queue_handler_mutex);
- if (queue_handler[pf] == qh)
+ old = rcu_dereference_protected(queue_handler[pf],
+ lockdep_is_held(&queue_handler_mutex));
+ if (old == qh)
ret = -EEXIST;
- else if (queue_handler[pf])
+ else if (old)
ret = -EBUSY;
else {
rcu_assign_pointer(queue_handler[pf], qh);
@@ -49,11 +52,15 @@ EXPORT_SYMBOL(nf_register_queue_handler);
/* The caller must flush their queue before this */
int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
{
+ const struct nf_queue_handler *old;
+
if (pf >= ARRAY_SIZE(queue_handler))
return -EINVAL;
mutex_lock(&queue_handler_mutex);
- if (queue_handler[pf] && queue_handler[pf] != qh) {
+ old = rcu_dereference_protected(queue_handler[pf],
+ lockdep_is_held(&queue_handler_mutex));
+ if (old && old != qh) {
mutex_unlock(&queue_handler_mutex);
return -EINVAL;
}
@@ -73,7 +80,10 @@ void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
mutex_lock(&queue_handler_mutex);
for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++) {
- if (queue_handler[pf] == qh)
+ if (rcu_dereference_protected(
+ queue_handler[pf],
+ lockdep_is_held(&queue_handler_mutex)
+ ) == qh)
rcu_assign_pointer(queue_handler[pf], NULL);
}
mutex_unlock(&queue_handler_mutex);
@@ -115,7 +125,7 @@ static int __nf_queue(struct sk_buff *skb,
int (*okfn)(struct sk_buff *),
unsigned int queuenum)
{
- int status;
+ int status = -ENOENT;
struct nf_queue_entry *entry = NULL;
#ifdef CONFIG_BRIDGE_NETFILTER
struct net_device *physindev;
@@ -128,16 +138,20 @@ static int __nf_queue(struct sk_buff *skb,
rcu_read_lock();
qh = rcu_dereference(queue_handler[pf]);
- if (!qh)
+ if (!qh) {
+ status = -ESRCH;
goto err_unlock;
+ }
afinfo = nf_get_afinfo(pf);
if (!afinfo)
goto err_unlock;
entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
- if (!entry)
+ if (!entry) {
+ status = -ENOMEM;
goto err_unlock;
+ }
*entry = (struct nf_queue_entry) {
.skb = skb,
@@ -151,11 +165,9 @@ static int __nf_queue(struct sk_buff *skb,
/* If it's going away, ignore hook. */
if (!try_module_get(entry->elem->owner)) {
- rcu_read_unlock();
- kfree(entry);
- return 0;
+ status = -ECANCELED;
+ goto err_unlock;
}
-
/* Bump dev refs so they don't vanish while packet is out */
if (indev)
dev_hold(indev);
@@ -182,14 +194,13 @@ static int __nf_queue(struct sk_buff *skb,
goto err;
}
- return 1;
+ return 0;
err_unlock:
rcu_read_unlock();
err:
- kfree_skb(skb);
kfree(entry);
- return 1;
+ return status;
}
int nf_queue(struct sk_buff *skb,
@@ -201,6 +212,8 @@ int nf_queue(struct sk_buff *skb,
unsigned int queuenum)
{
struct sk_buff *segs;
+ int err;
+ unsigned int queued;
if (!skb_is_gso(skb))
return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
@@ -216,20 +229,35 @@ int nf_queue(struct sk_buff *skb,
}
segs = skb_gso_segment(skb, 0);
- kfree_skb(skb);
+ /* Does not use PTR_ERR to limit the number of error codes that can be
+ * returned by nf_queue. For instance, callers rely on -ECANCELED to mean
+ * 'ignore this hook'.
+ */
if (IS_ERR(segs))
- return 1;
+ return -EINVAL;
+ queued = 0;
+ err = 0;
do {
struct sk_buff *nskb = segs->next;
segs->next = NULL;
- if (!__nf_queue(segs, elem, pf, hook, indev, outdev, okfn,
- queuenum))
+ if (err == 0)
+ err = __nf_queue(segs, elem, pf, hook, indev,
+ outdev, okfn, queuenum);
+ if (err == 0)
+ queued++;
+ else
kfree_skb(segs);
segs = nskb;
} while (segs);
- return 1;
+
+ /* also free orig skb if only some segments were queued */
+ if (unlikely(err && queued))
+ err = 0;
+ if (err == 0)
+ kfree_skb(skb);
+ return err;
}
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
@@ -237,6 +265,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
struct sk_buff *skb = entry->skb;
struct list_head *elem = &entry->elem->list;
const struct nf_afinfo *afinfo;
+ int err;
rcu_read_lock();
@@ -270,10 +299,17 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
local_bh_enable();
break;
case NF_QUEUE:
- if (!__nf_queue(skb, elem, entry->pf, entry->hook,
- entry->indev, entry->outdev, entry->okfn,
- verdict >> NF_VERDICT_BITS))
- goto next_hook;
+ err = __nf_queue(skb, elem, entry->pf, entry->hook,
+ entry->indev, entry->outdev, entry->okfn,
+ verdict >> NF_VERDICT_QBITS);
+ if (err < 0) {
+ if (err == -ECANCELED)
+ goto next_hook;
+ if (err == -ESRCH &&
+ (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
+ goto next_hook;
+ kfree_skb(skb);
+ }
break;
case NF_STOLEN:
default:
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 6a1572b..91592da 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -874,19 +874,19 @@ static struct hlist_node *get_first(struct iter_state *st)
for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
if (!hlist_empty(&instance_table[st->bucket]))
- return rcu_dereference_bh(instance_table[st->bucket].first);
+ return rcu_dereference_bh(hlist_first_rcu(&instance_table[st->bucket]));
}
return NULL;
}
static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h)
{
- h = rcu_dereference_bh(h->next);
+ h = rcu_dereference_bh(hlist_next_rcu(h));
while (!h) {
if (++st->bucket >= INSTANCE_BUCKETS)
return NULL;
- h = rcu_dereference_bh(instance_table[st->bucket].first);
+ h = rcu_dereference_bh(hlist_first_rcu(&instance_table[st->bucket]));
}
return h;
}
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 68e67d1..b83123f 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -387,25 +387,31 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
{
struct sk_buff *nskb;
struct nfqnl_instance *queue;
- int err;
+ int err = -ENOBUFS;
/* rcu_read_lock()ed by nf_hook_slow() */
queue = instance_lookup(queuenum);
- if (!queue)
+ if (!queue) {
+ err = -ESRCH;
goto err_out;
+ }
- if (queue->copy_mode == NFQNL_COPY_NONE)
+ if (queue->copy_mode == NFQNL_COPY_NONE) {
+ err = -EINVAL;
goto err_out;
+ }
nskb = nfqnl_build_packet_message(queue, entry);
- if (nskb == NULL)
+ if (nskb == NULL) {
+ err = -ENOMEM;
goto err_out;
-
+ }
spin_lock_bh(&queue->lock);
- if (!queue->peer_pid)
+ if (!queue->peer_pid) {
+ err = -EINVAL;
goto err_out_free_nskb;
-
+ }
if (queue->queue_total >= queue->queue_maxlen) {
queue->queue_dropped++;
if (net_ratelimit())
@@ -432,7 +438,7 @@ err_out_free_nskb:
err_out_unlock:
spin_unlock_bh(&queue->lock);
err_out:
- return -1;
+ return err;
}
static int
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index c942376..0a77d2f 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -23,6 +23,7 @@
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/audit.h>
#include <net/net_namespace.h>
#include <linux/netfilter/x_tables.h>
@@ -38,9 +39,8 @@ MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
struct compat_delta {
- struct compat_delta *next;
- unsigned int offset;
- int delta;
+ unsigned int offset; /* offset in kernel */
+ int delta; /* delta in 32bit user land */
};
struct xt_af {
@@ -49,7 +49,9 @@ struct xt_af {
struct list_head target;
#ifdef CONFIG_COMPAT
struct mutex compat_mutex;
- struct compat_delta *compat_offsets;
+ struct compat_delta *compat_tab;
+ unsigned int number; /* number of slots in compat_tab[] */
+ unsigned int cur; /* number of used slots in compat_tab[] */
#endif
};
@@ -414,54 +416,67 @@ int xt_check_match(struct xt_mtchk_param *par,
EXPORT_SYMBOL_GPL(xt_check_match);
#ifdef CONFIG_COMPAT
-int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta)
+int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
{
- struct compat_delta *tmp;
+ struct xt_af *xp = &xt[af];
- tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
+ if (!xp->compat_tab) {
+ if (!xp->number)
+ return -EINVAL;
+ xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
+ if (!xp->compat_tab)
+ return -ENOMEM;
+ xp->cur = 0;
+ }
- tmp->offset = offset;
- tmp->delta = delta;
+ if (xp->cur >= xp->number)
+ return -EINVAL;
- if (xt[af].compat_offsets) {
- tmp->next = xt[af].compat_offsets->next;
- xt[af].compat_offsets->next = tmp;
- } else {
- xt[af].compat_offsets = tmp;
- tmp->next = NULL;
- }
+ if (xp->cur)
+ delta += xp->compat_tab[xp->cur - 1].delta;
+ xp->compat_tab[xp->cur].offset = offset;
+ xp->compat_tab[xp->cur].delta = delta;
+ xp->cur++;
return 0;
}
EXPORT_SYMBOL_GPL(xt_compat_add_offset);
void xt_compat_flush_offsets(u_int8_t af)
{
- struct compat_delta *tmp, *next;
-
- if (xt[af].compat_offsets) {
- for (tmp = xt[af].compat_offsets; tmp; tmp = next) {
- next = tmp->next;
- kfree(tmp);
- }
- xt[af].compat_offsets = NULL;
+ if (xt[af].compat_tab) {
+ vfree(xt[af].compat_tab);
+ xt[af].compat_tab = NULL;
+ xt[af].number = 0;
}
}
EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
{
- struct compat_delta *tmp;
- int delta;
-
- for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next)
- if (tmp->offset < offset)
- delta += tmp->delta;
- return delta;
+ struct compat_delta *tmp = xt[af].compat_tab;
+ int mid, left = 0, right = xt[af].cur - 1;
+
+ while (left <= right) {
+ mid = (left + right) >> 1;
+ if (offset > tmp[mid].offset)
+ left = mid + 1;
+ else if (offset < tmp[mid].offset)
+ right = mid - 1;
+ else
+ return mid ? tmp[mid - 1].delta : 0;
+ }
+ WARN_ON_ONCE(1);
+ return 0;
}
EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
+void xt_compat_init_offsets(u_int8_t af, unsigned int number)
+{
+ xt[af].number = number;
+ xt[af].cur = 0;
+}
+EXPORT_SYMBOL(xt_compat_init_offsets);
+
int xt_compat_match_offset(const struct xt_match *match)
{
u_int16_t csize = match->compatsize ? : match->matchsize;
@@ -820,6 +835,21 @@ xt_replace_table(struct xt_table *table,
*/
local_bh_enable();
+#ifdef CONFIG_AUDIT
+ if (audit_enabled) {
+ struct audit_buffer *ab;
+
+ ab = audit_log_start(current->audit_context, GFP_KERNEL,
+ AUDIT_NETFILTER_CFG);
+ if (ab) {
+ audit_log_format(ab, "table=%s family=%u entries=%u",
+ table->name, table->af,
+ private->number);
+ audit_log_end(ab);
+ }
+ }
+#endif
+
return private;
}
EXPORT_SYMBOL_GPL(xt_replace_table);
@@ -1338,7 +1368,7 @@ static int __init xt_init(void)
mutex_init(&xt[i].mutex);
#ifdef CONFIG_COMPAT
mutex_init(&xt[i].compat_mutex);
- xt[i].compat_offsets = NULL;
+ xt[i].compat_tab = NULL;
#endif
INIT_LIST_HEAD(&xt[i].target);
INIT_LIST_HEAD(&xt[i].match);
diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c
new file mode 100644
index 0000000..81802d2
--- /dev/null
+++ b/net/netfilter/xt_AUDIT.c
@@ -0,0 +1,204 @@
+/*
+ * Creates audit record for dropped/accepted packets
+ *
+ * (C) 2010-2011 Thomas Graf <tgraf@redhat.com>
+ * (C) 2010-2011 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/audit.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_AUDIT.h>
+#include <net/ipv6.h>
+#include <net/ip.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thomas Graf <tgraf@redhat.com>");
+MODULE_DESCRIPTION("Xtables: creates audit records for dropped/accepted packets");
+MODULE_ALIAS("ipt_AUDIT");
+MODULE_ALIAS("ip6t_AUDIT");
+MODULE_ALIAS("ebt_AUDIT");
+MODULE_ALIAS("arpt_AUDIT");
+
+static void audit_proto(struct audit_buffer *ab, struct sk_buff *skb,
+ unsigned int proto, unsigned int offset)
+{
+ switch (proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_UDPLITE: {
+ const __be16 *pptr;
+ __be16 _ports[2];
+
+ pptr = skb_header_pointer(skb, offset, sizeof(_ports), _ports);
+ if (pptr == NULL) {
+ audit_log_format(ab, " truncated=1");
+ return;
+ }
+
+ audit_log_format(ab, " sport=%hu dport=%hu",
+ ntohs(pptr[0]), ntohs(pptr[1]));
+ }
+ break;
+
+ case IPPROTO_ICMP:
+ case IPPROTO_ICMPV6: {
+ const u8 *iptr;
+ u8 _ih[2];
+
+ iptr = skb_header_pointer(skb, offset, sizeof(_ih), &_ih);
+ if (iptr == NULL) {
+ audit_log_format(ab, " truncated=1");
+ return;
+ }
+
+ audit_log_format(ab, " icmptype=%hhu icmpcode=%hhu",
+ iptr[0], iptr[1]);
+
+ }
+ break;
+ }
+}
+
+static void audit_ip4(struct audit_buffer *ab, struct sk_buff *skb)
+{
+ struct iphdr _iph;
+ const struct iphdr *ih;
+
+ ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
+ if (!ih) {
+ audit_log_format(ab, " truncated=1");
+ return;
+ }
+
+ audit_log_format(ab, " saddr=%pI4 daddr=%pI4 ipid=%hu proto=%hhu",
+ &ih->saddr, &ih->daddr, ntohs(ih->id), ih->protocol);
+
+ if (ntohs(ih->frag_off) & IP_OFFSET) {
+ audit_log_format(ab, " frag=1");
+ return;
+ }
+
+ audit_proto(ab, skb, ih->protocol, ih->ihl * 4);
+}
+
+static void audit_ip6(struct audit_buffer *ab, struct sk_buff *skb)
+{
+ struct ipv6hdr _ip6h;
+ const struct ipv6hdr *ih;
+ u8 nexthdr;
+ int offset;
+
+ ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h);
+ if (!ih) {
+ audit_log_format(ab, " truncated=1");
+ return;
+ }
+
+ nexthdr = ih->nexthdr;
+ offset = ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h),
+ &nexthdr);
+
+ audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu",
+ &ih->saddr, &ih->daddr, nexthdr);
+
+ if (offset)
+ audit_proto(ab, skb, nexthdr, offset);
+}
+
+static unsigned int
+audit_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_audit_info *info = par->targinfo;
+ struct audit_buffer *ab;
+
+ ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT);
+ if (ab == NULL)
+ goto errout;
+
+ audit_log_format(ab, "action=%hhu hook=%u len=%u inif=%s outif=%s",
+ info->type, par->hooknum, skb->len,
+ par->in ? par->in->name : "?",
+ par->out ? par->out->name : "?");
+
+ if (skb->mark)
+ audit_log_format(ab, " mark=%#x", skb->mark);
+
+ if (skb->dev && skb->dev->type == ARPHRD_ETHER) {
+ audit_log_format(ab, " smac=%pM dmac=%pM macproto=0x%04x",
+ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+ ntohs(eth_hdr(skb)->h_proto));
+
+ if (par->family == NFPROTO_BRIDGE) {
+ switch (eth_hdr(skb)->h_proto) {
+ case __constant_htons(ETH_P_IP):
+ audit_ip4(ab, skb);
+ break;
+
+ case __constant_htons(ETH_P_IPV6):
+ audit_ip6(ab, skb);
+ break;
+ }
+ }
+ }
+
+ switch (par->family) {
+ case NFPROTO_IPV4:
+ audit_ip4(ab, skb);
+ break;
+
+ case NFPROTO_IPV6:
+ audit_ip6(ab, skb);
+ break;
+ }
+
+ audit_log_end(ab);
+
+errout:
+ return XT_CONTINUE;
+}
+
+static int audit_tg_check(const struct xt_tgchk_param *par)
+{
+ const struct xt_audit_info *info = par->targinfo;
+
+ if (info->type > XT_AUDIT_TYPE_MAX) {
+ pr_info("Audit type out of range (valid range: 0..%hhu)\n",
+ XT_AUDIT_TYPE_MAX);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static struct xt_target audit_tg_reg __read_mostly = {
+ .name = "AUDIT",
+ .family = NFPROTO_UNSPEC,
+ .target = audit_tg,
+ .targetsize = sizeof(struct xt_audit_info),
+ .checkentry = audit_tg_check,
+ .me = THIS_MODULE,
+};
+
+static int __init audit_tg_init(void)
+{
+ return xt_register_target(&audit_tg_reg);
+}
+
+static void __exit audit_tg_exit(void)
+{
+ xt_unregister_target(&audit_tg_reg);
+}
+
+module_init(audit_tg_init);
+module_exit(audit_tg_exit);
diff --git a/net/netfilter/xt_CLASSIFY.c b/net/netfilter/xt_CLASSIFY.c
index c2c0e4a..af9c4da 100644
--- a/net/netfilter/xt_CLASSIFY.c
+++ b/net/netfilter/xt_CLASSIFY.c
@@ -19,12 +19,14 @@
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_CLASSIFY.h>
+#include <linux/netfilter_arp.h>
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Xtables: Qdisc classification");
MODULE_ALIAS("ipt_CLASSIFY");
MODULE_ALIAS("ip6t_CLASSIFY");
+MODULE_ALIAS("arpt_CLASSIFY");
static unsigned int
classify_tg(struct sk_buff *skb, const struct xt_action_param *par)
@@ -35,26 +37,36 @@ classify_tg(struct sk_buff *skb, const struct xt_action_param *par)
return XT_CONTINUE;
}
-static struct xt_target classify_tg_reg __read_mostly = {
- .name = "CLASSIFY",
- .revision = 0,
- .family = NFPROTO_UNSPEC,
- .table = "mangle",
- .hooks = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD) |
- (1 << NF_INET_POST_ROUTING),
- .target = classify_tg,
- .targetsize = sizeof(struct xt_classify_target_info),
- .me = THIS_MODULE,
+static struct xt_target classify_tg_reg[] __read_mostly = {
+ {
+ .name = "CLASSIFY",
+ .revision = 0,
+ .family = NFPROTO_UNSPEC,
+ .hooks = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD) |
+ (1 << NF_INET_POST_ROUTING),
+ .target = classify_tg,
+ .targetsize = sizeof(struct xt_classify_target_info),
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "CLASSIFY",
+ .revision = 0,
+ .family = NFPROTO_ARP,
+ .hooks = (1 << NF_ARP_OUT) | (1 << NF_ARP_FORWARD),
+ .target = classify_tg,
+ .targetsize = sizeof(struct xt_classify_target_info),
+ .me = THIS_MODULE,
+ },
};
static int __init classify_tg_init(void)
{
- return xt_register_target(&classify_tg_reg);
+ return xt_register_targets(classify_tg_reg, ARRAY_SIZE(classify_tg_reg));
}
static void __exit classify_tg_exit(void)
{
- xt_unregister_target(&classify_tg_reg);
+ xt_unregister_targets(classify_tg_reg, ARRAY_SIZE(classify_tg_reg));
}
module_init(classify_tg_init);
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index be1f22e..3bdd443 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -313,3 +313,5 @@ MODULE_AUTHOR("Timo Teras <ext-timo.teras@nokia.com>");
MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
MODULE_DESCRIPTION("Xtables: idle time monitor");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("ipt_IDLETIMER");
+MODULE_ALIAS("ip6t_IDLETIMER");
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
index a414050..993de2b 100644
--- a/net/netfilter/xt_LED.c
+++ b/net/netfilter/xt_LED.c
@@ -31,6 +31,8 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Adam Nielsen <a.nielsen@shikadi.net>");
MODULE_DESCRIPTION("Xtables: trigger LED devices on packet match");
+MODULE_ALIAS("ipt_LED");
+MODULE_ALIAS("ip6t_LED");
static LIST_HEAD(xt_led_triggers);
static DEFINE_MUTEX(xt_led_mutex);
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 039cce1..d4f4b5d 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -72,18 +72,31 @@ nfqueue_tg_v1(struct sk_buff *skb, const struct xt_action_param *par)
if (info->queues_total > 1) {
if (par->family == NFPROTO_IPV4)
- queue = hash_v4(skb) % info->queues_total + queue;
+ queue = (((u64) hash_v4(skb) * info->queues_total) >>
+ 32) + queue;
#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
else if (par->family == NFPROTO_IPV6)
- queue = hash_v6(skb) % info->queues_total + queue;
+ queue = (((u64) hash_v6(skb) * info->queues_total) >>
+ 32) + queue;
#endif
}
return NF_QUEUE_NR(queue);
}
-static int nfqueue_tg_v1_check(const struct xt_tgchk_param *par)
+static unsigned int
+nfqueue_tg_v2(struct sk_buff *skb, const struct xt_action_param *par)
{
- const struct xt_NFQ_info_v1 *info = par->targinfo;
+ const struct xt_NFQ_info_v2 *info = par->targinfo;
+ unsigned int ret = nfqueue_tg_v1(skb, par);
+
+ if (info->bypass)
+ ret |= NF_VERDICT_FLAG_QUEUE_BYPASS;
+ return ret;
+}
+
+static int nfqueue_tg_check(const struct xt_tgchk_param *par)
+{
+ const struct xt_NFQ_info_v2 *info = par->targinfo;
u32 maxid;
if (unlikely(!rnd_inited)) {
@@ -100,6 +113,8 @@ static int nfqueue_tg_v1_check(const struct xt_tgchk_param *par)
info->queues_total, maxid);
return -ERANGE;
}
+ if (par->target->revision == 2 && info->bypass > 1)
+ return -EINVAL;
return 0;
}
@@ -115,11 +130,20 @@ static struct xt_target nfqueue_tg_reg[] __read_mostly = {
.name = "NFQUEUE",
.revision = 1,
.family = NFPROTO_UNSPEC,
- .checkentry = nfqueue_tg_v1_check,
+ .checkentry = nfqueue_tg_check,
.target = nfqueue_tg_v1,
.targetsize = sizeof(struct xt_NFQ_info_v1),
.me = THIS_MODULE,
},
+ {
+ .name = "NFQUEUE",
+ .revision = 2,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = nfqueue_tg_check,
+ .target = nfqueue_tg_v2,
+ .targetsize = sizeof(struct xt_NFQ_info_v2),
+ .me = THIS_MODULE,
+ },
};
static int __init nfqueue_tg_init(void)
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 5c5b6b9..e029c48 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -185,18 +185,24 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
int connections;
ct = nf_ct_get(skb, &ctinfo);
- if (ct != NULL)
- tuple_ptr = &ct->tuplehash[0].tuple;
- else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
- par->family, &tuple))
+ if (ct != NULL) {
+ if (info->flags & XT_CONNLIMIT_DADDR)
+ tuple_ptr = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+ else
+ tuple_ptr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+ } else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+ par->family, &tuple)) {
goto hotdrop;
+ }
if (par->family == NFPROTO_IPV6) {
const struct ipv6hdr *iph = ipv6_hdr(skb);
- memcpy(&addr.ip6, &iph->saddr, sizeof(iph->saddr));
+ memcpy(&addr.ip6, (info->flags & XT_CONNLIMIT_DADDR) ?
+ &iph->daddr : &iph->saddr, sizeof(addr.ip6));
} else {
const struct iphdr *iph = ip_hdr(skb);
- addr.ip = iph->saddr;
+ addr.ip = (info->flags & XT_CONNLIMIT_DADDR) ?
+ iph->daddr : iph->saddr;
}
spin_lock_bh(&info->data->lock);
@@ -204,13 +210,12 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
&info->mask, par->family);
spin_unlock_bh(&info->data->lock);
- if (connections < 0) {
+ if (connections < 0)
/* kmalloc failed, drop it entirely */
- par->hotdrop = true;
- return false;
- }
+ goto hotdrop;
- return (connections > info->limit) ^ info->inverse;
+ return (connections > info->limit) ^
+ !!(info->flags & XT_CONNLIMIT_INVERT);
hotdrop:
par->hotdrop = true;
@@ -268,25 +273,38 @@ static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
kfree(info->data);
}
-static struct xt_match connlimit_mt_reg __read_mostly = {
- .name = "connlimit",
- .revision = 0,
- .family = NFPROTO_UNSPEC,
- .checkentry = connlimit_mt_check,
- .match = connlimit_mt,
- .matchsize = sizeof(struct xt_connlimit_info),
- .destroy = connlimit_mt_destroy,
- .me = THIS_MODULE,
+static struct xt_match connlimit_mt_reg[] __read_mostly = {
+ {
+ .name = "connlimit",
+ .revision = 0,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = connlimit_mt_check,
+ .match = connlimit_mt,
+ .matchsize = sizeof(struct xt_connlimit_info),
+ .destroy = connlimit_mt_destroy,
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "connlimit",
+ .revision = 1,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = connlimit_mt_check,
+ .match = connlimit_mt,
+ .matchsize = sizeof(struct xt_connlimit_info),
+ .destroy = connlimit_mt_destroy,
+ .me = THIS_MODULE,
+ },
};
static int __init connlimit_mt_init(void)
{
- return xt_register_match(&connlimit_mt_reg);
+ return xt_register_matches(connlimit_mt_reg,
+ ARRAY_SIZE(connlimit_mt_reg));
}
static void __exit connlimit_mt_exit(void)
{
- xt_unregister_match(&connlimit_mt_reg);
+ xt_unregister_matches(connlimit_mt_reg, ARRAY_SIZE(connlimit_mt_reg));
}
module_init(connlimit_mt_init);
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index e536710..4ef1b63 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -112,6 +112,54 @@ ct_proto_port_check(const struct xt_conntrack_mtinfo2 *info,
return true;
}
+static inline bool
+port_match(u16 min, u16 max, u16 port, bool invert)
+{
+ return (port >= min && port <= max) ^ invert;
+}
+
+static inline bool
+ct_proto_port_check_v3(const struct xt_conntrack_mtinfo3 *info,
+ const struct nf_conn *ct)
+{
+ const struct nf_conntrack_tuple *tuple;
+
+ tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+ if ((info->match_flags & XT_CONNTRACK_PROTO) &&
+ (nf_ct_protonum(ct) == info->l4proto) ^
+ !(info->invert_flags & XT_CONNTRACK_PROTO))
+ return false;
+
+ /* Shortcut to match all recognized protocols by using ->src.all. */
+ if ((info->match_flags & XT_CONNTRACK_ORIGSRC_PORT) &&
+ !port_match(info->origsrc_port, info->origsrc_port_high,
+ ntohs(tuple->src.u.all),
+ info->invert_flags & XT_CONNTRACK_ORIGSRC_PORT))
+ return false;
+
+ if ((info->match_flags & XT_CONNTRACK_ORIGDST_PORT) &&
+ !port_match(info->origdst_port, info->origdst_port_high,
+ ntohs(tuple->dst.u.all),
+ info->invert_flags & XT_CONNTRACK_ORIGDST_PORT))
+ return false;
+
+ tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+
+ if ((info->match_flags & XT_CONNTRACK_REPLSRC_PORT) &&
+ !port_match(info->replsrc_port, info->replsrc_port_high,
+ ntohs(tuple->src.u.all),
+ info->invert_flags & XT_CONNTRACK_REPLSRC_PORT))
+ return false;
+
+ if ((info->match_flags & XT_CONNTRACK_REPLDST_PORT) &&
+ !port_match(info->repldst_port, info->repldst_port_high,
+ ntohs(tuple->dst.u.all),
+ info->invert_flags & XT_CONNTRACK_REPLDST_PORT))
+ return false;
+
+ return true;
+}
+
static bool
conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
u16 state_mask, u16 status_mask)
@@ -170,8 +218,13 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
!(info->invert_flags & XT_CONNTRACK_REPLDST))
return false;
- if (!ct_proto_port_check(info, ct))
- return false;
+ if (par->match->revision != 3) {
+ if (!ct_proto_port_check(info, ct))
+ return false;
+ } else {
+ if (!ct_proto_port_check_v3(par->matchinfo, ct))
+ return false;
+ }
if ((info->match_flags & XT_CONNTRACK_STATUS) &&
(!!(status_mask & ct->status) ^
@@ -207,6 +260,14 @@ conntrack_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
return conntrack_mt(skb, par, info->state_mask, info->status_mask);
}
+static bool
+conntrack_mt_v3(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_conntrack_mtinfo3 *info = par->matchinfo;
+
+ return conntrack_mt(skb, par, info->state_mask, info->status_mask);
+}
+
static int conntrack_mt_check(const struct xt_mtchk_param *par)
{
int ret;
@@ -244,6 +305,16 @@ static struct xt_match conntrack_mt_reg[] __read_mostly = {
.destroy = conntrack_mt_destroy,
.me = THIS_MODULE,
},
+ {
+ .name = "conntrack",
+ .revision = 3,
+ .family = NFPROTO_UNSPEC,
+ .matchsize = sizeof(struct xt_conntrack_mtinfo3),
+ .match = conntrack_mt_v3,
+ .checkentry = conntrack_mt_check,
+ .destroy = conntrack_mt_destroy,
+ .me = THIS_MODULE,
+ },
};
static int __init conntrack_mt_init(void)
diff --git a/net/netfilter/xt_cpu.c b/net/netfilter/xt_cpu.c
index b39db8a..c7a2e54 100644
--- a/net/netfilter/xt_cpu.c
+++ b/net/netfilter/xt_cpu.c
@@ -22,6 +22,8 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Dumazet <eric.dumazet@gmail.com>");
MODULE_DESCRIPTION("Xtables: CPU match");
+MODULE_ALIAS("ipt_cpu");
+MODULE_ALIAS("ip6t_cpu");
static int cpu_mt_check(const struct xt_mtchk_param *par)
{
diff --git a/net/netfilter/xt_devgroup.c b/net/netfilter/xt_devgroup.c
new file mode 100644
index 0000000..d9202cd
--- /dev/null
+++ b/net/netfilter/xt_devgroup.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include <linux/netfilter/xt_devgroup.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Xtables: Device group match");
+MODULE_ALIAS("ipt_devgroup");
+MODULE_ALIAS("ip6t_devgroup");
+
+static bool devgroup_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_devgroup_info *info = par->matchinfo;
+
+ if (info->flags & XT_DEVGROUP_MATCH_SRC &&
+ (((info->src_group ^ par->in->group) & info->src_mask ? 1 : 0) ^
+ ((info->flags & XT_DEVGROUP_INVERT_SRC) ? 1 : 0)))
+ return false;
+
+ if (info->flags & XT_DEVGROUP_MATCH_DST &&
+ (((info->dst_group ^ par->out->group) & info->dst_mask ? 1 : 0) ^
+ ((info->flags & XT_DEVGROUP_INVERT_DST) ? 1 : 0)))
+ return false;
+
+ return true;
+}
+
+static int devgroup_mt_checkentry(const struct xt_mtchk_param *par)
+{
+ const struct xt_devgroup_info *info = par->matchinfo;
+
+ if (info->flags & ~(XT_DEVGROUP_MATCH_SRC | XT_DEVGROUP_INVERT_SRC |
+ XT_DEVGROUP_MATCH_DST | XT_DEVGROUP_INVERT_DST))
+ return -EINVAL;
+
+ if (info->flags & XT_DEVGROUP_MATCH_SRC &&
+ par->hook_mask & ~((1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_FORWARD)))
+ return -EINVAL;
+
+ if (info->flags & XT_DEVGROUP_MATCH_DST &&
+ par->hook_mask & ~((1 << NF_INET_FORWARD) |
+ (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_POST_ROUTING)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct xt_match devgroup_mt_reg __read_mostly = {
+ .name = "devgroup",
+ .match = devgroup_mt,
+ .checkentry = devgroup_mt_checkentry,
+ .matchsize = sizeof(struct xt_devgroup_info),
+ .family = NFPROTO_UNSPEC,
+ .me = THIS_MODULE
+};
+
+static int __init devgroup_mt_init(void)
+{
+ return xt_register_match(&devgroup_mt_reg);
+}
+
+static void __exit devgroup_mt_exit(void)
+{
+ xt_unregister_match(&devgroup_mt_reg);
+}
+
+module_init(devgroup_mt_init);
+module_exit(devgroup_mt_exit);
diff --git a/net/netfilter/xt_iprange.c b/net/netfilter/xt_iprange.c
index 73c33a4..b46626c 100644
--- a/net/netfilter/xt_iprange.c
+++ b/net/netfilter/xt_iprange.c
@@ -31,7 +31,7 @@ iprange_mt4(const struct sk_buff *skb, struct xt_action_param *par)
pr_debug("src IP %pI4 NOT in range %s%pI4-%pI4\n",
&iph->saddr,
(info->flags & IPRANGE_SRC_INV) ? "(INV) " : "",
- &info->src_max.ip,
+ &info->src_min.ip,
&info->src_max.ip);
return false;
}
@@ -76,15 +76,27 @@ iprange_mt6(const struct sk_buff *skb, struct xt_action_param *par)
m = iprange_ipv6_lt(&iph->saddr, &info->src_min.in6);
m |= iprange_ipv6_lt(&info->src_max.in6, &iph->saddr);
m ^= !!(info->flags & IPRANGE_SRC_INV);
- if (m)
+ if (m) {
+ pr_debug("src IP %pI6 NOT in range %s%pI6-%pI6\n",
+ &iph->saddr,
+ (info->flags & IPRANGE_SRC_INV) ? "(INV) " : "",
+ &info->src_min.in6,
+ &info->src_max.in6);
return false;
+ }
}
if (info->flags & IPRANGE_DST) {
m = iprange_ipv6_lt(&iph->daddr, &info->dst_min.in6);
m |= iprange_ipv6_lt(&info->dst_max.in6, &iph->daddr);
m ^= !!(info->flags & IPRANGE_DST_INV);
- if (m)
+ if (m) {
+ pr_debug("dst IP %pI6 NOT in range %s%pI6-%pI6\n",
+ &iph->daddr,
+ (info->flags & IPRANGE_DST_INV) ? "(INV) " : "",
+ &info->dst_min.in6,
+ &info->dst_max.in6);
return false;
+ }
}
return true;
}
diff --git a/net/netfilter/xt_ipvs.c b/net/netfilter/xt_ipvs.c
index 9127a3d..bb10b07 100644
--- a/net/netfilter/xt_ipvs.c
+++ b/net/netfilter/xt_ipvs.c
@@ -85,7 +85,7 @@ ipvs_mt(const struct sk_buff *skb, struct xt_action_param *par)
/*
* Check if the packet belongs to an existing entry
*/
- cp = pp->conn_out_get(family, skb, pp, &iph, iph.len, 1 /* inverse */);
+ cp = pp->conn_out_get(family, skb, &iph, iph.len, 1 /* inverse */);
if (unlikely(cp == NULL)) {
match = false;
goto out;
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
new file mode 100644
index 0000000..061d48c
--- /dev/null
+++ b/net/netfilter/xt_set.c
@@ -0,0 +1,359 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ * Patrick Schaaf <bof@bof.de>
+ * Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module which implements the set match and SET target
+ * for netfilter/iptables. */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_set.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("Xtables: IP set match and target module");
+MODULE_ALIAS("xt_SET");
+MODULE_ALIAS("ipt_set");
+MODULE_ALIAS("ip6t_set");
+MODULE_ALIAS("ipt_SET");
+MODULE_ALIAS("ip6t_SET");
+
+static inline int
+match_set(ip_set_id_t index, const struct sk_buff *skb,
+ u8 pf, u8 dim, u8 flags, int inv)
+{
+ if (ip_set_test(index, skb, pf, dim, flags))
+ inv = !inv;
+ return inv;
+}
+
+/* Revision 0 interface: backward compatible with netfilter/iptables */
+
+static bool
+set_match_v0(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_set_info_match_v0 *info = par->matchinfo;
+
+ return match_set(info->match_set.index, skb, par->family,
+ info->match_set.u.compat.dim,
+ info->match_set.u.compat.flags,
+ info->match_set.u.compat.flags & IPSET_INV_MATCH);
+}
+
+static void
+compat_flags(struct xt_set_info_v0 *info)
+{
+ u_int8_t i;
+
+ /* Fill out compatibility data according to enum ip_set_kopt */
+ info->u.compat.dim = IPSET_DIM_ZERO;
+ if (info->u.flags[0] & IPSET_MATCH_INV)
+ info->u.compat.flags |= IPSET_INV_MATCH;
+ for (i = 0; i < IPSET_DIM_MAX-1 && info->u.flags[i]; i++) {
+ info->u.compat.dim++;
+ if (info->u.flags[i] & IPSET_SRC)
+ info->u.compat.flags |= (1<<info->u.compat.dim);
+ }
+}
+
+static int
+set_match_v0_checkentry(const struct xt_mtchk_param *par)
+{
+ struct xt_set_info_match_v0 *info = par->matchinfo;
+ ip_set_id_t index;
+
+ index = ip_set_nfnl_get_byindex(info->match_set.index);
+
+ if (index == IPSET_INVALID_ID) {
+ pr_warning("Cannot find set indentified by id %u to match\n",
+ info->match_set.index);
+ return -ENOENT;
+ }
+ if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) {
+ pr_warning("Protocol error: set match dimension "
+ "is over the limit!\n");
+ return -ERANGE;
+ }
+
+ /* Fill out compatibility data */
+ compat_flags(&info->match_set);
+
+ return 0;
+}
+
+static void
+set_match_v0_destroy(const struct xt_mtdtor_param *par)
+{
+ struct xt_set_info_match_v0 *info = par->matchinfo;
+
+ ip_set_nfnl_put(info->match_set.index);
+}
+
+static unsigned int
+set_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_set_info_target_v0 *info = par->targinfo;
+
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_add(info->add_set.index, skb, par->family,
+ info->add_set.u.compat.dim,
+ info->add_set.u.compat.flags);
+ if (info->del_set.index != IPSET_INVALID_ID)
+ ip_set_del(info->del_set.index, skb, par->family,
+ info->del_set.u.compat.dim,
+ info->del_set.u.compat.flags);
+
+ return XT_CONTINUE;
+}
+
+static int
+set_target_v0_checkentry(const struct xt_tgchk_param *par)
+{
+ struct xt_set_info_target_v0 *info = par->targinfo;
+ ip_set_id_t index;
+
+ if (info->add_set.index != IPSET_INVALID_ID) {
+ index = ip_set_nfnl_get_byindex(info->add_set.index);
+ if (index == IPSET_INVALID_ID) {
+ pr_warning("Cannot find add_set index %u as target\n",
+ info->add_set.index);
+ return -ENOENT;
+ }
+ }
+
+ if (info->del_set.index != IPSET_INVALID_ID) {
+ index = ip_set_nfnl_get_byindex(info->del_set.index);
+ if (index == IPSET_INVALID_ID) {
+ pr_warning("Cannot find del_set index %u as target\n",
+ info->del_set.index);
+ return -ENOENT;
+ }
+ }
+ if (info->add_set.u.flags[IPSET_DIM_MAX-1] != 0 ||
+ info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) {
+ pr_warning("Protocol error: SET target dimension "
+ "is over the limit!\n");
+ return -ERANGE;
+ }
+
+ /* Fill out compatibility data */
+ compat_flags(&info->add_set);
+ compat_flags(&info->del_set);
+
+ return 0;
+}
+
+static void
+set_target_v0_destroy(const struct xt_tgdtor_param *par)
+{
+ const struct xt_set_info_target_v0 *info = par->targinfo;
+
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->add_set.index);
+ if (info->del_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->del_set.index);
+}
+
+/* Revision 1: current interface to netfilter/iptables */
+
+static bool
+set_match(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ const struct xt_set_info_match *info = par->matchinfo;
+
+ return match_set(info->match_set.index, skb, par->family,
+ info->match_set.dim,
+ info->match_set.flags,
+ info->match_set.flags & IPSET_INV_MATCH);
+}
+
+static int
+set_match_checkentry(const struct xt_mtchk_param *par)
+{
+ struct xt_set_info_match *info = par->matchinfo;
+ ip_set_id_t index;
+
+ index = ip_set_nfnl_get_byindex(info->match_set.index);
+
+ if (index == IPSET_INVALID_ID) {
+ pr_warning("Cannot find set indentified by id %u to match\n",
+ info->match_set.index);
+ return -ENOENT;
+ }
+ if (info->match_set.dim > IPSET_DIM_MAX) {
+ pr_warning("Protocol error: set match dimension "
+ "is over the limit!\n");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static void
+set_match_destroy(const struct xt_mtdtor_param *par)
+{
+ struct xt_set_info_match *info = par->matchinfo;
+
+ ip_set_nfnl_put(info->match_set.index);
+}
+
+static unsigned int
+set_target(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_set_info_target *info = par->targinfo;
+
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_add(info->add_set.index,
+ skb, par->family,
+ info->add_set.dim,
+ info->add_set.flags);
+ if (info->del_set.index != IPSET_INVALID_ID)
+ ip_set_del(info->del_set.index,
+ skb, par->family,
+ info->add_set.dim,
+ info->del_set.flags);
+
+ return XT_CONTINUE;
+}
+
+static int
+set_target_checkentry(const struct xt_tgchk_param *par)
+{
+ const struct xt_set_info_target *info = par->targinfo;
+ ip_set_id_t index;
+
+ if (info->add_set.index != IPSET_INVALID_ID) {
+ index = ip_set_nfnl_get_byindex(info->add_set.index);
+ if (index == IPSET_INVALID_ID) {
+ pr_warning("Cannot find add_set index %u as target\n",
+ info->add_set.index);
+ return -ENOENT;
+ }
+ }
+
+ if (info->del_set.index != IPSET_INVALID_ID) {
+ index = ip_set_nfnl_get_byindex(info->del_set.index);
+ if (index == IPSET_INVALID_ID) {
+ pr_warning("Cannot find del_set index %u as target\n",
+ info->del_set.index);
+ return -ENOENT;
+ }
+ }
+ if (info->add_set.dim > IPSET_DIM_MAX ||
+ info->del_set.flags > IPSET_DIM_MAX) {
+ pr_warning("Protocol error: SET target dimension "
+ "is over the limit!\n");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static void
+set_target_destroy(const struct xt_tgdtor_param *par)
+{
+ const struct xt_set_info_target *info = par->targinfo;
+
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->add_set.index);
+ if (info->del_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(info->del_set.index);
+}
+
+static struct xt_match set_matches[] __read_mostly = {
+ {
+ .name = "set",
+ .family = NFPROTO_IPV4,
+ .revision = 0,
+ .match = set_match_v0,
+ .matchsize = sizeof(struct xt_set_info_match_v0),
+ .checkentry = set_match_v0_checkentry,
+ .destroy = set_match_v0_destroy,
+ .me = THIS_MODULE
+ },
+ {
+ .name = "set",
+ .family = NFPROTO_IPV4,
+ .revision = 1,
+ .match = set_match,
+ .matchsize = sizeof(struct xt_set_info_match),
+ .checkentry = set_match_checkentry,
+ .destroy = set_match_destroy,
+ .me = THIS_MODULE
+ },
+ {
+ .name = "set",
+ .family = NFPROTO_IPV6,
+ .revision = 1,
+ .match = set_match,
+ .matchsize = sizeof(struct xt_set_info_match),
+ .checkentry = set_match_checkentry,
+ .destroy = set_match_destroy,
+ .me = THIS_MODULE
+ },
+};
+
+static struct xt_target set_targets[] __read_mostly = {
+ {
+ .name = "SET",
+ .revision = 0,
+ .family = NFPROTO_IPV4,
+ .target = set_target_v0,
+ .targetsize = sizeof(struct xt_set_info_target_v0),
+ .checkentry = set_target_v0_checkentry,
+ .destroy = set_target_v0_destroy,
+ .me = THIS_MODULE
+ },
+ {
+ .name = "SET",
+ .revision = 1,
+ .family = NFPROTO_IPV4,
+ .target = set_target,
+ .targetsize = sizeof(struct xt_set_info_target),
+ .checkentry = set_target_checkentry,
+ .destroy = set_target_destroy,
+ .me = THIS_MODULE
+ },
+ {
+ .name = "SET",
+ .revision = 1,
+ .family = NFPROTO_IPV6,
+ .target = set_target,
+ .targetsize = sizeof(struct xt_set_info_target),
+ .checkentry = set_target_checkentry,
+ .destroy = set_target_destroy,
+ .me = THIS_MODULE
+ },
+};
+
+static int __init xt_set_init(void)
+{
+ int ret = xt_register_matches(set_matches, ARRAY_SIZE(set_matches));
+
+ if (!ret) {
+ ret = xt_register_targets(set_targets,
+ ARRAY_SIZE(set_targets));
+ if (ret)
+ xt_unregister_matches(set_matches,
+ ARRAY_SIZE(set_matches));
+ }
+ return ret;
+}
+
+static void __exit xt_set_fini(void)
+{
+ xt_unregister_matches(set_matches, ARRAY_SIZE(set_matches));
+ xt_unregister_targets(set_targets, ARRAY_SIZE(set_targets));
+}
+
+module_init(xt_set_init);
+module_exit(xt_set_fini);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 91cb1d7..5efef5b 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -164,7 +164,6 @@ struct packet_mreq_max {
static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
int closing, int tx_ring);
-#define PGV_FROM_VMALLOC 1
struct pgv {
char *buffer;
};
@@ -466,7 +465,7 @@ retry:
*/
err = -EMSGSIZE;
- if (len > dev->mtu + dev->hard_header_len)
+ if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN)
goto out_unlock;
if (!skb) {
@@ -497,6 +496,19 @@ retry:
goto retry;
}
+ if (len > (dev->mtu + dev->hard_header_len)) {
+ /* Earlier code assumed this would be a VLAN pkt,
+ * double-check this now that we have the actual
+ * packet in hand.
+ */
+ struct ethhdr *ehdr;
+ skb_reset_mac_header(skb);
+ ehdr = eth_hdr(skb);
+ if (ehdr->h_proto != htons(ETH_P_8021Q)) {
+ err = -EMSGSIZE;
+ goto out_unlock;
+ }
+ }
skb->protocol = proto;
skb->dev = dev;
@@ -523,11 +535,11 @@ static inline unsigned int run_filter(const struct sk_buff *skb,
{
struct sk_filter *filter;
- rcu_read_lock_bh();
- filter = rcu_dereference_bh(sk->sk_filter);
+ rcu_read_lock();
+ filter = rcu_dereference(sk->sk_filter);
if (filter != NULL)
res = sk_run_filter(skb, filter->insns);
- rcu_read_unlock_bh();
+ rcu_read_unlock();
return res;
}
@@ -1200,7 +1212,7 @@ static int packet_snd(struct socket *sock,
}
err = -EMSGSIZE;
- if (!gso_type && (len > dev->mtu+reserve))
+ if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN))
goto out_unlock;
err = -ENOBUFS;
@@ -1225,6 +1237,20 @@ static int packet_snd(struct socket *sock,
if (err < 0)
goto out_free;
+ if (!gso_type && (len > dev->mtu + reserve)) {
+ /* Earlier code assumed this would be a VLAN pkt,
+ * double-check this now that we have the actual
+ * packet in hand.
+ */
+ struct ethhdr *ehdr;
+ skb_reset_mac_header(skb);
+ ehdr = eth_hdr(skb);
+ if (ehdr->h_proto != htons(ETH_P_8021Q)) {
+ err = -EMSGSIZE;
+ goto out_free;
+ }
+ }
+
skb->protocol = proto;
skb->dev = dev;
skb->priority = sk->sk_priority;
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 9542449..da8adac 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -50,7 +50,6 @@ rdsdebug(char *fmt, ...)
#define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
#define RDS_CONG_MAP_BYTES (65536 / 8)
-#define RDS_CONG_MAP_LONGS (RDS_CONG_MAP_BYTES / sizeof(unsigned long))
#define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
#define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index d952e7e..5ee0c62 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -803,7 +803,6 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
rose_insert_socket(sk); /* Finish the bind */
}
-rose_try_next_neigh:
rose->dest_addr = addr->srose_addr;
rose->dest_call = addr->srose_call;
rose->rand = ((long)rose & 0xFFFF) + rose->lci;
@@ -865,12 +864,6 @@ rose_try_next_neigh:
}
if (sk->sk_state != TCP_ESTABLISHED) {
- /* Try next neighbour */
- rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic, 0);
- if (rose->neighbour)
- goto rose_try_next_neigh;
-
- /* No more neighbours */
sock->state = SS_UNCONNECTED;
err = sock_error(sk); /* Always set at this point */
goto out_release;
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index b4fdaac..88a77e9 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -674,29 +674,34 @@ struct rose_route *rose_route_free_lci(unsigned int lci, struct rose_neigh *neig
* Find a neighbour or a route given a ROSE address.
*/
struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
- unsigned char *diagnostic, int new)
+ unsigned char *diagnostic, int route_frame)
{
struct rose_neigh *res = NULL;
struct rose_node *node;
int failed = 0;
int i;
- if (!new) spin_lock_bh(&rose_node_list_lock);
+ if (!route_frame) spin_lock_bh(&rose_node_list_lock);
for (node = rose_node_list; node != NULL; node = node->next) {
if (rosecmpm(addr, &node->address, node->mask) == 0) {
for (i = 0; i < node->count; i++) {
- if (new) {
- if (node->neighbour[i]->restarted) {
- res = node->neighbour[i];
- goto out;
- }
+ if (node->neighbour[i]->restarted) {
+ res = node->neighbour[i];
+ goto out;
}
- else {
+ }
+ }
+ }
+ if (!route_frame) { /* connect request */
+ for (node = rose_node_list; node != NULL; node = node->next) {
+ if (rosecmpm(addr, &node->address, node->mask) == 0) {
+ for (i = 0; i < node->count; i++) {
if (!rose_ftimer_running(node->neighbour[i])) {
res = node->neighbour[i];
+ failed = 0;
goto out;
- } else
- failed = 1;
+ }
+ failed = 1;
}
}
}
@@ -711,8 +716,7 @@ struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
}
out:
- if (!new) spin_unlock_bh(&rose_node_list_lock);
-
+ if (!route_frame) spin_unlock_bh(&rose_node_list_lock);
return res;
}
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index f04d4a4..8c19b6e 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -205,6 +205,29 @@ config NET_SCH_DRR
If unsure, say N.
+config NET_SCH_MQPRIO
+ tristate "Multi-queue priority scheduler (MQPRIO)"
+ help
+ Say Y here if you want to use the Multi-queue Priority scheduler.
+ This scheduler allows QOS to be offloaded on NICs that have support
+ for offloading QOS schedulers.
+
+ To compile this driver as a module, choose M here: the module will
+ be called sch_mqprio.
+
+ If unsure, say N.
+
+config NET_SCH_CHOKE
+ tristate "CHOose and Keep responsive flow scheduler (CHOKE)"
+ help
+ Say Y here if you want to use the CHOKe packet scheduler (CHOose
+ and Keep for responsive flows, CHOose and Kill for unresponsive
+ flows). This is a variation of RED which trys to penalize flows
+ that monopolize the queue.
+
+ To compile this code as a module, choose M here: the
+ module will be called sch_choke.
+
config NET_SCH_INGRESS
tristate "Ingress Qdisc"
depends on NET_CLS_ACT
@@ -243,7 +266,7 @@ config NET_CLS_TCINDEX
config NET_CLS_ROUTE4
tristate "Routing decision (ROUTE)"
- select NET_CLS_ROUTE
+ select IP_ROUTE_CLASSID
select NET_CLS
---help---
If you say Y here, you will be able to classify packets
@@ -252,9 +275,6 @@ config NET_CLS_ROUTE4
To compile this code as a module, choose M here: the
module will be called cls_route.
-config NET_CLS_ROUTE
- bool
-
config NET_CLS_FW
tristate "Netfilter mark (FW)"
select NET_CLS
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 960f5db..06c6cdf 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -32,6 +32,9 @@ obj-$(CONFIG_NET_SCH_MULTIQ) += sch_multiq.o
obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o
obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o
obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o
+obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
+obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
+
obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
obj-$(CONFIG_NET_CLS_FW) += cls_fw.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 23b25f8..15873e1 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -78,7 +78,7 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
struct tc_action *a, struct tcf_hashinfo *hinfo)
{
struct tcf_common *p;
- int err = 0, index = -1,i = 0, s_i = 0, n_i = 0;
+ int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
struct nlattr *nest;
read_lock_bh(hinfo->lock);
@@ -126,7 +126,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
{
struct tcf_common *p, *s_p;
struct nlattr *nest;
- int i= 0, n_i = 0;
+ int i = 0, n_i = 0;
nest = nla_nest_start(skb, a->order);
if (nest == NULL)
@@ -138,7 +138,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
while (p != NULL) {
s_p = p->tcfc_next;
if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo))
- module_put(a->ops->owner);
+ module_put(a->ops->owner);
n_i++;
p = s_p;
}
@@ -447,7 +447,8 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
- if ((err = tcf_action_dump_old(skb, a, bind, ref)) > 0) {
+ err = tcf_action_dump_old(skb, a, bind, ref);
+ if (err > 0) {
nla_nest_end(skb, nest);
return err;
}
@@ -491,7 +492,7 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est,
struct tc_action *a;
struct tc_action_ops *a_o;
char act_name[IFNAMSIZ];
- struct nlattr *tb[TCA_ACT_MAX+1];
+ struct nlattr *tb[TCA_ACT_MAX + 1];
struct nlattr *kind;
int err;
@@ -549,9 +550,9 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est,
goto err_free;
/* module count goes up only when brand new policy is created
- if it exists and is only bound to in a_o->init() then
- ACT_P_CREATED is not returned (a zero is).
- */
+ * if it exists and is only bound to in a_o->init() then
+ * ACT_P_CREATED is not returned (a zero is).
+ */
if (err != ACT_P_CREATED)
module_put(a_o->owner);
a->ops = a_o;
@@ -569,7 +570,7 @@ err_out:
struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est,
char *name, int ovr, int bind)
{
- struct nlattr *tb[TCA_ACT_MAX_PRIO+1];
+ struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct tc_action *head = NULL, *act, *act_prev = NULL;
int err;
int i;
@@ -697,7 +698,7 @@ act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n,
static struct tc_action *
tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
{
- struct nlattr *tb[TCA_ACT_MAX+1];
+ struct nlattr *tb[TCA_ACT_MAX + 1];
struct tc_action *a;
int index;
int err;
@@ -770,7 +771,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
struct tcamsg *t;
struct netlink_callback dcb;
struct nlattr *nest;
- struct nlattr *tb[TCA_ACT_MAX+1];
+ struct nlattr *tb[TCA_ACT_MAX + 1];
struct nlattr *kind;
struct tc_action *a = create_a(0);
int err = -ENOMEM;
@@ -821,7 +822,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
nlh->nlmsg_flags |= NLM_F_ROOT;
module_put(a->ops->owner);
kfree(a);
- err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+ err = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+ n->nlmsg_flags & NLM_F_ECHO);
if (err > 0)
return 0;
@@ -842,14 +844,14 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
u32 pid, int event)
{
int i, ret;
- struct nlattr *tb[TCA_ACT_MAX_PRIO+1];
+ struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct tc_action *head = NULL, *act, *act_prev = NULL;
ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL);
if (ret < 0)
return ret;
- if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) {
+ if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
if (tb[1] != NULL)
return tca_action_flush(net, tb[1], n, pid);
else
@@ -892,7 +894,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
/* now do the delete */
tcf_action_destroy(head, 0);
ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
- n->nlmsg_flags&NLM_F_ECHO);
+ n->nlmsg_flags & NLM_F_ECHO);
if (ret > 0)
return 0;
return ret;
@@ -936,7 +938,7 @@ static int tcf_add_notify(struct net *net, struct tc_action *a,
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
NETLINK_CB(skb).dst_group = RTNLGRP_TC;
- err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags&NLM_F_ECHO);
+ err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags & NLM_F_ECHO);
if (err > 0)
err = 0;
return err;
@@ -967,7 +969,7 @@ tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
/* dump then free all the actions after update; inserted policy
* stays intact
- * */
+ */
ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags);
for (a = act; a; a = act) {
act = a->next;
@@ -993,8 +995,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
return -EINVAL;
}
- /* n->nlmsg_flags&NLM_F_CREATE
- * */
+ /* n->nlmsg_flags & NLM_F_CREATE */
switch (n->nlmsg_type) {
case RTM_NEWACTION:
/* we are going to assume all other flags
@@ -1003,7 +1004,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
* but since we want avoid ambiguity (eg when flags
* is zero) then just set this
*/
- if (n->nlmsg_flags&NLM_F_REPLACE)
+ if (n->nlmsg_flags & NLM_F_REPLACE)
ovr = 1;
replay:
ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr);
@@ -1028,7 +1029,7 @@ replay:
static struct nlattr *
find_dump_kind(const struct nlmsghdr *n)
{
- struct nlattr *tb1, *tb2[TCA_ACT_MAX+1];
+ struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct nlattr *nla[TCAA_MAX + 1];
struct nlattr *kind;
@@ -1071,9 +1072,8 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
}
a_o = tc_lookup_action(kind);
- if (a_o == NULL) {
+ if (a_o == NULL)
return 0;
- }
memset(&a, 0, sizeof(struct tc_action));
a.ops = a_o;
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 83ddfc0..6cdf9ab 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -63,7 +63,7 @@ static int tcf_csum_init(struct nlattr *nla, struct nlattr *est,
if (nla == NULL)
return -EINVAL;
- err = nla_parse_nested(tb, TCA_CSUM_MAX, nla,csum_policy);
+ err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy);
if (err < 0)
return err;
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index c2ed90a..2b4ab4b 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -50,7 +50,7 @@ static int gact_determ(struct tcf_gact *gact)
}
typedef int (*g_rand)(struct tcf_gact *gact);
-static g_rand gact_rand[MAX_RAND]= { NULL, gact_net_rand, gact_determ };
+static g_rand gact_rand[MAX_RAND] = { NULL, gact_net_rand, gact_determ };
#endif /* CONFIG_GACT_PROB */
static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = {
@@ -89,7 +89,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(parm->index, est, a, sizeof(*gact),
bind, &gact_idx_gen, &gact_hash_info);
if (IS_ERR(pc))
- return PTR_ERR(pc);
+ return PTR_ERR(pc);
ret = ACT_P_CREATED;
} else {
if (!ovr) {
@@ -205,9 +205,9 @@ MODULE_LICENSE("GPL");
static int __init gact_init_module(void)
{
#ifdef CONFIG_GACT_PROB
- printk(KERN_INFO "GACT probability on\n");
+ pr_info("GACT probability on\n");
#else
- printk(KERN_INFO "GACT probability NOT on\n");
+ pr_info("GACT probability NOT on\n");
#endif
return tcf_register_action(&act_gact_ops);
}
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index c2a7c20..9fc211a 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -138,7 +138,7 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(index, est, a, sizeof(*ipt), bind,
&ipt_idx_gen, &ipt_hash_info);
if (IS_ERR(pc))
- return PTR_ERR(pc);
+ return PTR_ERR(pc);
ret = ACT_P_CREATED;
} else {
if (!ovr) {
@@ -162,7 +162,8 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est,
if (unlikely(!t))
goto err2;
- if ((err = ipt_init_target(t, tname, hook)) < 0)
+ err = ipt_init_target(t, tname, hook);
+ if (err < 0)
goto err3;
spin_lock_bh(&ipt->tcf_lock);
@@ -212,8 +213,9 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
bstats_update(&ipt->tcf_bstats, skb);
/* yes, we have to worry about both in and out dev
- worry later - danger - this API seems to have changed
- from earlier kernels */
+ * worry later - danger - this API seems to have changed
+ * from earlier kernels
+ */
par.in = skb->dev;
par.out = NULL;
par.hooknum = ipt->tcfi_hook;
@@ -253,9 +255,9 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
struct tc_cnt c;
/* for simple targets kernel size == user size
- ** user name = target name
- ** for foolproof you need to not assume this
- */
+ * user name = target name
+ * for foolproof you need to not assume this
+ */
t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
if (unlikely(!t))
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index d765067..961386e 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -41,13 +41,13 @@ static struct tcf_hashinfo mirred_hash_info = {
.lock = &mirred_lock,
};
-static inline int tcf_mirred_release(struct tcf_mirred *m, int bind)
+static int tcf_mirred_release(struct tcf_mirred *m, int bind)
{
if (m) {
if (bind)
m->tcf_bindcnt--;
m->tcf_refcnt--;
- if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
+ if (!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
list_del(&m->tcfm_list);
if (m->tcfm_dev)
dev_put(m->tcfm_dev);
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 178a4bd..762b027 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -69,7 +69,7 @@ static int tcf_nat_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
&nat_idx_gen, &nat_hash_info);
if (IS_ERR(pc))
- return PTR_ERR(pc);
+ return PTR_ERR(pc);
p = to_tcf_nat(pc);
ret = ACT_P_CREATED;
} else {
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 445bef7..50c7c06 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -70,7 +70,7 @@ static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
&pedit_idx_gen, &pedit_hash_info);
if (IS_ERR(pc))
- return PTR_ERR(pc);
+ return PTR_ERR(pc);
p = to_pedit(pc);
keys = kmalloc(ksize, GFP_KERNEL);
if (keys == NULL) {
@@ -127,11 +127,9 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
int i, munged = 0;
unsigned int off;
- if (skb_cloned(skb)) {
- if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
- return p->tcf_action;
- }
- }
+ if (skb_cloned(skb) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ return p->tcf_action;
off = skb_network_offset(skb);
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index e2f08b1..8a16307 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -22,8 +22,8 @@
#include <net/act_api.h>
#include <net/netlink.h>
-#define L2T(p,L) qdisc_l2t((p)->tcfp_R_tab, L)
-#define L2T_P(p,L) qdisc_l2t((p)->tcfp_P_tab, L)
+#define L2T(p, L) qdisc_l2t((p)->tcfp_R_tab, L)
+#define L2T_P(p, L) qdisc_l2t((p)->tcfp_P_tab, L)
#define POL_TAB_MASK 15
static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
@@ -37,8 +37,7 @@ static struct tcf_hashinfo police_hash_info = {
};
/* old policer structure from before tc actions */
-struct tc_police_compat
-{
+struct tc_police_compat {
u32 index;
int action;
u32 limit;
@@ -139,7 +138,7 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
static int tcf_act_police_locate(struct nlattr *nla, struct nlattr *est,
struct tc_action *a, int ovr, int bind)
{
- unsigned h;
+ unsigned int h;
int ret = 0, err;
struct nlattr *tb[TCA_POLICE_MAX + 1];
struct tc_police *parm;
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 7287cff..a34a22d 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -47,7 +47,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result
/* print policy string followed by _ then packet count
* Example if this was the 3rd packet and the string was "hello"
* then it would look like "hello_3" (without quotes)
- **/
+ */
pr_info("simple: %s_%d\n",
(char *)d->tcfd_defdata, d->tcf_bstats.packets);
spin_unlock(&d->tcf_lock);
@@ -125,7 +125,7 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind,
&simp_idx_gen, &simp_hash_info);
if (IS_ERR(pc))
- return PTR_ERR(pc);
+ return PTR_ERR(pc);
d = to_defact(pc);
ret = alloc_defdata(d, defdata);
@@ -149,7 +149,7 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
return ret;
}
-static inline int tcf_simp_cleanup(struct tc_action *a, int bind)
+static int tcf_simp_cleanup(struct tc_action *a, int bind)
{
struct tcf_defact *d = a->priv;
@@ -158,8 +158,8 @@ static inline int tcf_simp_cleanup(struct tc_action *a, int bind)
return 0;
}
-static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
- int bind, int ref)
+static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
+ int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_defact *d = a->priv;
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 836f5fe..5f6f0c7 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -113,7 +113,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est,
pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind,
&skbedit_idx_gen, &skbedit_hash_info);
if (IS_ERR(pc))
- return PTR_ERR(pc);
+ return PTR_ERR(pc);
d = to_skbedit(pc);
ret = ACT_P_CREATED;
@@ -144,7 +144,7 @@ static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est,
return ret;
}
-static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind)
+static int tcf_skbedit_cleanup(struct tc_action *a, int bind)
{
struct tcf_skbedit *d = a->priv;
@@ -153,8 +153,8 @@ static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind)
return 0;
}
-static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
- int bind, int ref)
+static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
+ int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_skbedit *d = a->priv;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 5fd0c28..bb2c523 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -85,7 +85,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
int rc = -ENOENT;
write_lock(&cls_mod_lock);
- for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next)
+ for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next)
if (t == ops)
break;
@@ -111,7 +111,7 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp)
u32 first = TC_H_MAKE(0xC0000000U, 0U);
if (tp)
- first = tp->prio-1;
+ first = tp->prio - 1;
return first;
}
@@ -149,7 +149,8 @@ replay:
if (prio == 0) {
/* If no priority is given, user wants we allocated it. */
- if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
+ if (n->nlmsg_type != RTM_NEWTFILTER ||
+ !(n->nlmsg_flags & NLM_F_CREATE))
return -ENOENT;
prio = TC_H_MAKE(0x80000000U, 0U);
}
@@ -176,7 +177,8 @@ replay:
}
/* Is it classful? */
- if ((cops = q->ops->cl_ops) == NULL)
+ cops = q->ops->cl_ops;
+ if (!cops)
return -EINVAL;
if (cops->tcf_chain == NULL)
@@ -196,10 +198,11 @@ replay:
goto errout;
/* Check the chain for existence of proto-tcf with this priority */
- for (back = chain; (tp=*back) != NULL; back = &tp->next) {
+ for (back = chain; (tp = *back) != NULL; back = &tp->next) {
if (tp->prio >= prio) {
if (tp->prio == prio) {
- if (!nprio || (tp->protocol != protocol && protocol))
+ if (!nprio ||
+ (tp->protocol != protocol && protocol))
goto errout;
} else
tp = NULL;
@@ -216,7 +219,8 @@ replay:
goto errout;
err = -ENOENT;
- if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
+ if (n->nlmsg_type != RTM_NEWTFILTER ||
+ !(n->nlmsg_flags & NLM_F_CREATE))
goto errout;
@@ -420,7 +424,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
return skb->len;
- if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+ dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
return skb->len;
if (!tcm->tcm_parent)
@@ -429,7 +434,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
if (!q)
goto out;
- if ((cops = q->ops->cl_ops) == NULL)
+ cops = q->ops->cl_ops;
+ if (!cops)
goto errout;
if (cops->tcf_chain == NULL)
goto errout;
@@ -444,8 +450,9 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
s_t = cb->args[0];
- for (tp=*chain, t=0; tp; tp = tp->next, t++) {
- if (t < s_t) continue;
+ for (tp = *chain, t = 0; tp; tp = tp->next, t++) {
+ if (t < s_t)
+ continue;
if (TC_H_MAJ(tcm->tcm_info) &&
TC_H_MAJ(tcm->tcm_info) != tp->prio)
continue;
@@ -468,10 +475,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
arg.skb = skb;
arg.cb = cb;
arg.w.stop = 0;
- arg.w.skip = cb->args[1]-1;
+ arg.w.skip = cb->args[1] - 1;
arg.w.count = 0;
tp->ops->walk(tp, &arg.w);
- cb->args[1] = arg.w.count+1;
+ cb->args[1] = arg.w.count + 1;
if (arg.w.stop)
break;
}
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index f23d915..8be8872 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -21,14 +21,12 @@
#include <net/act_api.h>
#include <net/pkt_cls.h>
-struct basic_head
-{
+struct basic_head {
u32 hgenerator;
struct list_head flist;
};
-struct basic_filter
-{
+struct basic_filter {
u32 handle;
struct tcf_exts exts;
struct tcf_ematch_tree ematches;
@@ -92,8 +90,7 @@ static int basic_init(struct tcf_proto *tp)
return 0;
}
-static inline void basic_delete_filter(struct tcf_proto *tp,
- struct basic_filter *f)
+static void basic_delete_filter(struct tcf_proto *tp, struct basic_filter *f)
{
tcf_unbind_filter(tp, &f->res);
tcf_exts_destroy(tp, &f->exts);
@@ -135,9 +132,9 @@ static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = {
[TCA_BASIC_EMATCHES] = { .type = NLA_NESTED },
};
-static inline int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f,
- unsigned long base, struct nlattr **tb,
- struct nlattr *est)
+static int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f,
+ unsigned long base, struct nlattr **tb,
+ struct nlattr *est)
{
int err = -EINVAL;
struct tcf_exts e;
@@ -203,7 +200,7 @@ static int basic_change(struct tcf_proto *tp, unsigned long base, u32 handle,
} while (--i > 0 && basic_get(tp, head->hgenerator));
if (i <= 0) {
- printk(KERN_ERR "Insufficient number of handles\n");
+ pr_err("Insufficient number of handles\n");
goto errout;
}
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index d49c40f..32a3351 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -56,7 +56,8 @@ static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
{
struct cgroup_cls_state *cs;
- if (!(cs = kzalloc(sizeof(*cs), GFP_KERNEL)))
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs)
return ERR_PTR(-ENOMEM);
if (cgrp->parent)
@@ -94,8 +95,7 @@ static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
}
-struct cls_cgroup_head
-{
+struct cls_cgroup_head {
u32 handle;
struct tcf_exts exts;
struct tcf_ematch_tree ematches;
@@ -166,7 +166,7 @@ static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca,
unsigned long *arg)
{
- struct nlattr *tb[TCA_CGROUP_MAX+1];
+ struct nlattr *tb[TCA_CGROUP_MAX + 1];
struct cls_cgroup_head *head = tp->root;
struct tcf_ematch_tree t;
struct tcf_exts e;
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 5b271a1..8ec0139 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -121,7 +121,7 @@ static u32 flow_get_proto_src(struct sk_buff *skb)
if (!pskb_network_may_pull(skb, sizeof(*iph)))
break;
iph = ip_hdr(skb);
- if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+ if (iph->frag_off & htons(IP_MF | IP_OFFSET))
break;
poff = proto_ports_offset(iph->protocol);
if (poff >= 0 &&
@@ -163,7 +163,7 @@ static u32 flow_get_proto_dst(struct sk_buff *skb)
if (!pskb_network_may_pull(skb, sizeof(*iph)))
break;
iph = ip_hdr(skb);
- if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+ if (iph->frag_off & htons(IP_MF | IP_OFFSET))
break;
poff = proto_ports_offset(iph->protocol);
if (poff >= 0 &&
@@ -276,7 +276,7 @@ fallback:
static u32 flow_get_rtclassid(const struct sk_buff *skb)
{
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
if (skb_dst(skb))
return skb_dst(skb)->tclassid;
#endif
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 93b0a7b..26e7bc4 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -31,14 +31,12 @@
#define HTSIZE (PAGE_SIZE/sizeof(struct fw_filter *))
-struct fw_head
-{
+struct fw_head {
struct fw_filter *ht[HTSIZE];
u32 mask;
};
-struct fw_filter
-{
+struct fw_filter {
struct fw_filter *next;
u32 id;
struct tcf_result res;
@@ -53,7 +51,7 @@ static const struct tcf_ext_map fw_ext_map = {
.police = TCA_FW_POLICE
};
-static __inline__ int fw_hash(u32 handle)
+static inline int fw_hash(u32 handle)
{
if (HTSIZE == 4096)
return ((handle >> 24) & 0xFFF) ^
@@ -82,14 +80,14 @@ static __inline__ int fw_hash(u32 handle)
static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
{
- struct fw_head *head = (struct fw_head*)tp->root;
+ struct fw_head *head = (struct fw_head *)tp->root;
struct fw_filter *f;
int r;
u32 id = skb->mark;
if (head != NULL) {
id &= head->mask;
- for (f=head->ht[fw_hash(id)]; f; f=f->next) {
+ for (f = head->ht[fw_hash(id)]; f; f = f->next) {
if (f->id == id) {
*res = f->res;
#ifdef CONFIG_NET_CLS_IND
@@ -105,7 +103,8 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
}
} else {
/* old method */
- if (id && (TC_H_MAJ(id) == 0 || !(TC_H_MAJ(id^tp->q->handle)))) {
+ if (id && (TC_H_MAJ(id) == 0 ||
+ !(TC_H_MAJ(id ^ tp->q->handle)))) {
res->classid = id;
res->class = 0;
return 0;
@@ -117,13 +116,13 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
static unsigned long fw_get(struct tcf_proto *tp, u32 handle)
{
- struct fw_head *head = (struct fw_head*)tp->root;
+ struct fw_head *head = (struct fw_head *)tp->root;
struct fw_filter *f;
if (head == NULL)
return 0;
- for (f=head->ht[fw_hash(handle)]; f; f=f->next) {
+ for (f = head->ht[fw_hash(handle)]; f; f = f->next) {
if (f->id == handle)
return (unsigned long)f;
}
@@ -139,8 +138,7 @@ static int fw_init(struct tcf_proto *tp)
return 0;
}
-static inline void
-fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f)
+static void fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f)
{
tcf_unbind_filter(tp, &f->res);
tcf_exts_destroy(tp, &f->exts);
@@ -156,8 +154,8 @@ static void fw_destroy(struct tcf_proto *tp)
if (head == NULL)
return;
- for (h=0; h<HTSIZE; h++) {
- while ((f=head->ht[h]) != NULL) {
+ for (h = 0; h < HTSIZE; h++) {
+ while ((f = head->ht[h]) != NULL) {
head->ht[h] = f->next;
fw_delete_filter(tp, f);
}
@@ -167,14 +165,14 @@ static void fw_destroy(struct tcf_proto *tp)
static int fw_delete(struct tcf_proto *tp, unsigned long arg)
{
- struct fw_head *head = (struct fw_head*)tp->root;
- struct fw_filter *f = (struct fw_filter*)arg;
+ struct fw_head *head = (struct fw_head *)tp->root;
+ struct fw_filter *f = (struct fw_filter *)arg;
struct fw_filter **fp;
if (head == NULL || f == NULL)
goto out;
- for (fp=&head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) {
+ for (fp = &head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) {
if (*fp == f) {
tcf_tree_lock(tp);
*fp = f->next;
@@ -240,7 +238,7 @@ static int fw_change(struct tcf_proto *tp, unsigned long base,
struct nlattr **tca,
unsigned long *arg)
{
- struct fw_head *head = (struct fw_head*)tp->root;
+ struct fw_head *head = (struct fw_head *)tp->root;
struct fw_filter *f = (struct fw_filter *) *arg;
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_FW_MAX + 1];
@@ -302,7 +300,7 @@ errout:
static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
- struct fw_head *head = (struct fw_head*)tp->root;
+ struct fw_head *head = (struct fw_head *)tp->root;
int h;
if (head == NULL)
@@ -332,7 +330,7 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
struct fw_head *head = (struct fw_head *)tp->root;
- struct fw_filter *f = (struct fw_filter*)fh;
+ struct fw_filter *f = (struct fw_filter *)fh;
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 694dcd8..d580cdf 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -23,34 +23,30 @@
#include <net/pkt_cls.h>
/*
- 1. For now we assume that route tags < 256.
- It allows to use direct table lookups, instead of hash tables.
- 2. For now we assume that "from TAG" and "fromdev DEV" statements
- are mutually exclusive.
- 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
+ * 1. For now we assume that route tags < 256.
+ * It allows to use direct table lookups, instead of hash tables.
+ * 2. For now we assume that "from TAG" and "fromdev DEV" statements
+ * are mutually exclusive.
+ * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
*/
-struct route4_fastmap
-{
+struct route4_fastmap {
struct route4_filter *filter;
u32 id;
int iif;
};
-struct route4_head
-{
+struct route4_head {
struct route4_fastmap fastmap[16];
- struct route4_bucket *table[256+1];
+ struct route4_bucket *table[256 + 1];
};
-struct route4_bucket
-{
+struct route4_bucket {
/* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
- struct route4_filter *ht[16+16+1];
+ struct route4_filter *ht[16 + 16 + 1];
};
-struct route4_filter
-{
+struct route4_filter {
struct route4_filter *next;
u32 id;
int iif;
@@ -61,20 +57,20 @@ struct route4_filter
struct route4_bucket *bkt;
};
-#define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
+#define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
static const struct tcf_ext_map route_ext_map = {
.police = TCA_ROUTE4_POLICE,
.action = TCA_ROUTE4_ACT
};
-static __inline__ int route4_fastmap_hash(u32 id, int iif)
+static inline int route4_fastmap_hash(u32 id, int iif)
{
- return id&0xF;
+ return id & 0xF;
}
-static inline
-void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
+static void
+route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
{
spinlock_t *root_lock = qdisc_root_sleeping_lock(q);
@@ -83,32 +79,33 @@ void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
spin_unlock_bh(root_lock);
}
-static inline void
+static void
route4_set_fastmap(struct route4_head *head, u32 id, int iif,
struct route4_filter *f)
{
int h = route4_fastmap_hash(id, iif);
+
head->fastmap[h].id = id;
head->fastmap[h].iif = iif;
head->fastmap[h].filter = f;
}
-static __inline__ int route4_hash_to(u32 id)
+static inline int route4_hash_to(u32 id)
{
- return id&0xFF;
+ return id & 0xFF;
}
-static __inline__ int route4_hash_from(u32 id)
+static inline int route4_hash_from(u32 id)
{
- return (id>>16)&0xF;
+ return (id >> 16) & 0xF;
}
-static __inline__ int route4_hash_iif(int iif)
+static inline int route4_hash_iif(int iif)
{
- return 16 + ((iif>>16)&0xF);
+ return 16 + ((iif >> 16) & 0xF);
}
-static __inline__ int route4_hash_wild(void)
+static inline int route4_hash_wild(void)
{
return 32;
}
@@ -131,21 +128,22 @@ static __inline__ int route4_hash_wild(void)
static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
{
- struct route4_head *head = (struct route4_head*)tp->root;
+ struct route4_head *head = (struct route4_head *)tp->root;
struct dst_entry *dst;
struct route4_bucket *b;
struct route4_filter *f;
u32 id, h;
int iif, dont_cache = 0;
- if ((dst = skb_dst(skb)) == NULL)
+ dst = skb_dst(skb);
+ if (!dst)
goto failure;
id = dst->tclassid;
if (head == NULL)
goto old_method;
- iif = ((struct rtable*)dst)->fl.iif;
+ iif = ((struct rtable *)dst)->fl.iif;
h = route4_fastmap_hash(id, iif);
if (id == head->fastmap[h].id &&
@@ -161,7 +159,8 @@ static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
h = route4_hash_to(id);
restart:
- if ((b = head->table[h]) != NULL) {
+ b = head->table[h];
+ if (b) {
for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
if (f->id == id)
ROUTE4_APPLY_RESULT();
@@ -197,8 +196,9 @@ old_method:
static inline u32 to_hash(u32 id)
{
- u32 h = id&0xFF;
- if (id&0x8000)
+ u32 h = id & 0xFF;
+
+ if (id & 0x8000)
h += 256;
return h;
}
@@ -211,17 +211,17 @@ static inline u32 from_hash(u32 id)
if (!(id & 0x8000)) {
if (id > 255)
return 256;
- return id&0xF;
+ return id & 0xF;
}
- return 16 + (id&0xF);
+ return 16 + (id & 0xF);
}
static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
{
- struct route4_head *head = (struct route4_head*)tp->root;
+ struct route4_head *head = (struct route4_head *)tp->root;
struct route4_bucket *b;
struct route4_filter *f;
- unsigned h1, h2;
+ unsigned int h1, h2;
if (!head)
return 0;
@@ -230,11 +230,12 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
if (h1 > 256)
return 0;
- h2 = from_hash(handle>>16);
+ h2 = from_hash(handle >> 16);
if (h2 > 32)
return 0;
- if ((b = head->table[h1]) != NULL) {
+ b = head->table[h1];
+ if (b) {
for (f = b->ht[h2]; f; f = f->next)
if (f->handle == handle)
return (unsigned long)f;
@@ -251,7 +252,7 @@ static int route4_init(struct tcf_proto *tp)
return 0;
}
-static inline void
+static void
route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
{
tcf_unbind_filter(tp, &f->res);
@@ -267,11 +268,12 @@ static void route4_destroy(struct tcf_proto *tp)
if (head == NULL)
return;
- for (h1=0; h1<=256; h1++) {
+ for (h1 = 0; h1 <= 256; h1++) {
struct route4_bucket *b;
- if ((b = head->table[h1]) != NULL) {
- for (h2=0; h2<=32; h2++) {
+ b = head->table[h1];
+ if (b) {
+ for (h2 = 0; h2 <= 32; h2++) {
struct route4_filter *f;
while ((f = b->ht[h2]) != NULL) {
@@ -287,9 +289,9 @@ static void route4_destroy(struct tcf_proto *tp)
static int route4_delete(struct tcf_proto *tp, unsigned long arg)
{
- struct route4_head *head = (struct route4_head*)tp->root;
- struct route4_filter **fp, *f = (struct route4_filter*)arg;
- unsigned h = 0;
+ struct route4_head *head = (struct route4_head *)tp->root;
+ struct route4_filter **fp, *f = (struct route4_filter *)arg;
+ unsigned int h = 0;
struct route4_bucket *b;
int i;
@@ -299,7 +301,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg)
h = f->handle;
b = f->bkt;
- for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
+ for (fp = &b->ht[from_hash(h >> 16)]; *fp; fp = &(*fp)->next) {
if (*fp == f) {
tcf_tree_lock(tp);
*fp = f->next;
@@ -310,7 +312,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg)
/* Strip tree */
- for (i=0; i<=32; i++)
+ for (i = 0; i <= 32; i++)
if (b->ht[i])
return 0;
@@ -380,7 +382,8 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
}
h1 = to_hash(nhandle);
- if ((b = head->table[h1]) == NULL) {
+ b = head->table[h1];
+ if (!b) {
err = -ENOBUFS;
b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
if (b == NULL)
@@ -391,6 +394,7 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
tcf_tree_unlock(tp);
} else {
unsigned int h2 = from_hash(nhandle >> 16);
+
err = -EEXIST;
for (fp = b->ht[h2]; fp; fp = fp->next)
if (fp->handle == f->handle)
@@ -444,7 +448,8 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
if (err < 0)
return err;
- if ((f = (struct route4_filter*)*arg) != NULL) {
+ f = (struct route4_filter *)*arg;
+ if (f) {
if (f->handle != handle && handle)
return -EINVAL;
@@ -481,7 +486,7 @@ static int route4_change(struct tcf_proto *tp, unsigned long base,
reinsert:
h = from_hash(f->handle >> 16);
- for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
+ for (fp = &f->bkt->ht[h]; (f1 = *fp) != NULL; fp = &f1->next)
if (f->handle < f1->handle)
break;
@@ -492,7 +497,8 @@ reinsert:
if (old_handle && f->handle != old_handle) {
th = to_hash(old_handle);
h = from_hash(old_handle >> 16);
- if ((b = head->table[th]) != NULL) {
+ b = head->table[th];
+ if (b) {
for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
if (*fp == f) {
*fp = f->next;
@@ -515,7 +521,7 @@ errout:
static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
struct route4_head *head = tp->root;
- unsigned h, h1;
+ unsigned int h, h1;
if (head == NULL)
arg->stop = 1;
@@ -549,7 +555,7 @@ static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
static int route4_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
- struct route4_filter *f = (struct route4_filter*)fh;
+ struct route4_filter *f = (struct route4_filter *)fh;
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
u32 id;
@@ -563,15 +569,15 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh,
if (nest == NULL)
goto nla_put_failure;
- if (!(f->handle&0x8000)) {
- id = f->id&0xFF;
+ if (!(f->handle & 0x8000)) {
+ id = f->id & 0xFF;
NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
}
- if (f->handle&0x80000000) {
- if ((f->handle>>16) != 0xFFFF)
+ if (f->handle & 0x80000000) {
+ if ((f->handle >> 16) != 0xFFFF)
NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
} else {
- id = f->id>>16;
+ id = f->id >> 16;
NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
}
if (f->res.classid)
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 425a179..402c44b 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -66,28 +66,25 @@
powerful classification engine. */
-struct rsvp_head
-{
+struct rsvp_head {
u32 tmap[256/32];
u32 hgenerator;
u8 tgenerator;
struct rsvp_session *ht[256];
};
-struct rsvp_session
-{
+struct rsvp_session {
struct rsvp_session *next;
__be32 dst[RSVP_DST_LEN];
struct tc_rsvp_gpi dpi;
u8 protocol;
u8 tunnelid;
/* 16 (src,sport) hash slots, and one wildcard source slot */
- struct rsvp_filter *ht[16+1];
+ struct rsvp_filter *ht[16 + 1];
};
-struct rsvp_filter
-{
+struct rsvp_filter {
struct rsvp_filter *next;
__be32 src[RSVP_DST_LEN];
struct tc_rsvp_gpi spi;
@@ -100,17 +97,19 @@ struct rsvp_filter
struct rsvp_session *sess;
};
-static __inline__ unsigned hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
+static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
{
- unsigned h = (__force __u32)dst[RSVP_DST_LEN-1];
+ unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
+
h ^= h>>16;
h ^= h>>8;
return (h ^ protocol ^ tunnelid) & 0xFF;
}
-static __inline__ unsigned hash_src(__be32 *src)
+static inline unsigned int hash_src(__be32 *src)
{
- unsigned h = (__force __u32)src[RSVP_DST_LEN-1];
+ unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
+
h ^= h>>16;
h ^= h>>8;
h ^= h>>4;
@@ -134,10 +133,10 @@ static struct tcf_ext_map rsvp_ext_map = {
static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
{
- struct rsvp_session **sht = ((struct rsvp_head*)tp->root)->ht;
+ struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
struct rsvp_session *s;
struct rsvp_filter *f;
- unsigned h1, h2;
+ unsigned int h1, h2;
__be32 *dst, *src;
u8 protocol;
u8 tunnelid = 0;
@@ -162,13 +161,13 @@ restart:
src = &nhptr->saddr.s6_addr32[0];
dst = &nhptr->daddr.s6_addr32[0];
protocol = nhptr->nexthdr;
- xprt = ((u8*)nhptr) + sizeof(struct ipv6hdr);
+ xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr);
#else
src = &nhptr->saddr;
dst = &nhptr->daddr;
protocol = nhptr->protocol;
- xprt = ((u8*)nhptr) + (nhptr->ihl<<2);
- if (nhptr->frag_off & htons(IP_MF|IP_OFFSET))
+ xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
+ if (nhptr->frag_off & htons(IP_MF | IP_OFFSET))
return -1;
#endif
@@ -176,10 +175,10 @@ restart:
h2 = hash_src(src);
for (s = sht[h1]; s; s = s->next) {
- if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
+ if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
protocol == s->protocol &&
!(s->dpi.mask &
- (*(u32*)(xprt+s->dpi.offset)^s->dpi.key)) &&
+ (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) &&
#if RSVP_DST_LEN == 4
dst[0] == s->dst[0] &&
dst[1] == s->dst[1] &&
@@ -188,8 +187,8 @@ restart:
tunnelid == s->tunnelid) {
for (f = s->ht[h2]; f; f = f->next) {
- if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN-1] &&
- !(f->spi.mask & (*(u32*)(xprt+f->spi.offset)^f->spi.key))
+ if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
+ !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
#if RSVP_DST_LEN == 4
&&
src[0] == f->src[0] &&
@@ -205,7 +204,7 @@ matched:
return 0;
tunnelid = f->res.classid;
- nhptr = (void*)(xprt + f->tunnelhdr - sizeof(*nhptr));
+ nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr));
goto restart;
}
}
@@ -224,11 +223,11 @@ matched:
static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle)
{
- struct rsvp_session **sht = ((struct rsvp_head*)tp->root)->ht;
+ struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
struct rsvp_session *s;
struct rsvp_filter *f;
- unsigned h1 = handle&0xFF;
- unsigned h2 = (handle>>8)&0xFF;
+ unsigned int h1 = handle & 0xFF;
+ unsigned int h2 = (handle >> 8) & 0xFF;
if (h2 > 16)
return 0;
@@ -258,7 +257,7 @@ static int rsvp_init(struct tcf_proto *tp)
return -ENOBUFS;
}
-static inline void
+static void
rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
{
tcf_unbind_filter(tp, &f->res);
@@ -277,13 +276,13 @@ static void rsvp_destroy(struct tcf_proto *tp)
sht = data->ht;
- for (h1=0; h1<256; h1++) {
+ for (h1 = 0; h1 < 256; h1++) {
struct rsvp_session *s;
while ((s = sht[h1]) != NULL) {
sht[h1] = s->next;
- for (h2=0; h2<=16; h2++) {
+ for (h2 = 0; h2 <= 16; h2++) {
struct rsvp_filter *f;
while ((f = s->ht[h2]) != NULL) {
@@ -299,13 +298,13 @@ static void rsvp_destroy(struct tcf_proto *tp)
static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
{
- struct rsvp_filter **fp, *f = (struct rsvp_filter*)arg;
- unsigned h = f->handle;
+ struct rsvp_filter **fp, *f = (struct rsvp_filter *)arg;
+ unsigned int h = f->handle;
struct rsvp_session **sp;
struct rsvp_session *s = f->sess;
int i;
- for (fp = &s->ht[(h>>8)&0xFF]; *fp; fp = &(*fp)->next) {
+ for (fp = &s->ht[(h >> 8) & 0xFF]; *fp; fp = &(*fp)->next) {
if (*fp == f) {
tcf_tree_lock(tp);
*fp = f->next;
@@ -314,12 +313,12 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
/* Strip tree */
- for (i=0; i<=16; i++)
+ for (i = 0; i <= 16; i++)
if (s->ht[i])
return 0;
/* OK, session has no flows */
- for (sp = &((struct rsvp_head*)tp->root)->ht[h&0xFF];
+ for (sp = &((struct rsvp_head *)tp->root)->ht[h & 0xFF];
*sp; sp = &(*sp)->next) {
if (*sp == s) {
tcf_tree_lock(tp);
@@ -337,13 +336,14 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
return 0;
}
-static unsigned gen_handle(struct tcf_proto *tp, unsigned salt)
+static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
{
struct rsvp_head *data = tp->root;
int i = 0xFFFF;
while (i-- > 0) {
u32 h;
+
if ((data->hgenerator += 0x10000) == 0)
data->hgenerator = 0x10000;
h = data->hgenerator|salt;
@@ -355,10 +355,10 @@ static unsigned gen_handle(struct tcf_proto *tp, unsigned salt)
static int tunnel_bts(struct rsvp_head *data)
{
- int n = data->tgenerator>>5;
- u32 b = 1<<(data->tgenerator&0x1F);
+ int n = data->tgenerator >> 5;
+ u32 b = 1 << (data->tgenerator & 0x1F);
- if (data->tmap[n]&b)
+ if (data->tmap[n] & b)
return 0;
data->tmap[n] |= b;
return 1;
@@ -372,10 +372,10 @@ static void tunnel_recycle(struct rsvp_head *data)
memset(tmap, 0, sizeof(tmap));
- for (h1=0; h1<256; h1++) {
+ for (h1 = 0; h1 < 256; h1++) {
struct rsvp_session *s;
for (s = sht[h1]; s; s = s->next) {
- for (h2=0; h2<=16; h2++) {
+ for (h2 = 0; h2 <= 16; h2++) {
struct rsvp_filter *f;
for (f = s->ht[h2]; f; f = f->next) {
@@ -395,8 +395,8 @@ static u32 gen_tunnel(struct rsvp_head *data)
{
int i, k;
- for (k=0; k<2; k++) {
- for (i=255; i>0; i--) {
+ for (k = 0; k < 2; k++) {
+ for (i = 255; i > 0; i--) {
if (++data->tgenerator == 0)
data->tgenerator = 1;
if (tunnel_bts(data))
@@ -428,7 +428,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
struct nlattr *opt = tca[TCA_OPTIONS-1];
struct nlattr *tb[TCA_RSVP_MAX + 1];
struct tcf_exts e;
- unsigned h1, h2;
+ unsigned int h1, h2;
__be32 *dst;
int err;
@@ -443,7 +443,8 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
if (err < 0)
return err;
- if ((f = (struct rsvp_filter*)*arg) != NULL) {
+ f = (struct rsvp_filter *)*arg;
+ if (f) {
/* Node exists: adjust only classid */
if (f->handle != handle && handle)
@@ -500,7 +501,7 @@ static int rsvp_change(struct tcf_proto *tp, unsigned long base,
goto errout;
}
- for (sp = &data->ht[h1]; (s=*sp) != NULL; sp = &s->next) {
+ for (sp = &data->ht[h1]; (s = *sp) != NULL; sp = &s->next) {
if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
pinfo && pinfo->protocol == s->protocol &&
memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
@@ -523,7 +524,7 @@ insert:
tcf_exts_change(tp, &f->exts, &e);
for (fp = &s->ht[h2]; *fp; fp = &(*fp)->next)
- if (((*fp)->spi.mask&f->spi.mask) != f->spi.mask)
+ if (((*fp)->spi.mask & f->spi.mask) != f->spi.mask)
break;
f->next = *fp;
wmb();
@@ -567,7 +568,7 @@ errout2:
static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
struct rsvp_head *head = tp->root;
- unsigned h, h1;
+ unsigned int h, h1;
if (arg->stop)
return;
@@ -598,7 +599,7 @@ static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
- struct rsvp_filter *f = (struct rsvp_filter*)fh;
+ struct rsvp_filter *f = (struct rsvp_filter *)fh;
struct rsvp_session *s;
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
@@ -624,7 +625,7 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
NLA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo);
if (f->res.classid)
NLA_PUT_U32(skb, TCA_RSVP_CLASSID, f->res.classid);
- if (((f->handle>>8)&0xFF) != 16)
+ if (((f->handle >> 8) & 0xFF) != 16)
NLA_PUT(skb, TCA_RSVP_SRC, sizeof(f->src), f->src);
if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0)
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 20ef330..36667fa 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -249,7 +249,7 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
* of the hashing index is below the threshold.
*/
if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD)
- cp.hash = (cp.mask >> cp.shift)+1;
+ cp.hash = (cp.mask >> cp.shift) + 1;
else
cp.hash = DEFAULT_HASH_SIZE;
}
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index b0c2a82..966920c 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -42,8 +42,7 @@
#include <net/act_api.h>
#include <net/pkt_cls.h>
-struct tc_u_knode
-{
+struct tc_u_knode {
struct tc_u_knode *next;
u32 handle;
struct tc_u_hnode *ht_up;
@@ -63,19 +62,17 @@ struct tc_u_knode
struct tc_u32_sel sel;
};
-struct tc_u_hnode
-{
+struct tc_u_hnode {
struct tc_u_hnode *next;
u32 handle;
u32 prio;
struct tc_u_common *tp_c;
int refcnt;
- unsigned divisor;
+ unsigned int divisor;
struct tc_u_knode *ht[1];
};
-struct tc_u_common
-{
+struct tc_u_common {
struct tc_u_hnode *hlist;
struct Qdisc *q;
int refcnt;
@@ -87,9 +84,11 @@ static const struct tcf_ext_map u32_ext_map = {
.police = TCA_U32_POLICE
};
-static __inline__ unsigned u32_hash_fold(__be32 key, struct tc_u32_sel *sel, u8 fshift)
+static inline unsigned int u32_hash_fold(__be32 key,
+ const struct tc_u32_sel *sel,
+ u8 fshift)
{
- unsigned h = ntohl(key & sel->hmask)>>fshift;
+ unsigned int h = ntohl(key & sel->hmask) >> fshift;
return h;
}
@@ -101,7 +100,7 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re
unsigned int off;
} stack[TC_U32_MAXDEPTH];
- struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
+ struct tc_u_hnode *ht = (struct tc_u_hnode *)tp->root;
unsigned int off = skb_network_offset(skb);
struct tc_u_knode *n;
int sdepth = 0;
@@ -120,7 +119,7 @@ next_knode:
struct tc_u32_key *key = n->sel.keys;
#ifdef CONFIG_CLS_U32_PERF
- n->pf->rcnt +=1;
+ n->pf->rcnt += 1;
j = 0;
#endif
@@ -133,7 +132,7 @@ next_knode:
}
#endif
- for (i = n->sel.nkeys; i>0; i--, key++) {
+ for (i = n->sel.nkeys; i > 0; i--, key++) {
int toff = off + key->off + (off2 & key->offmask);
__be32 *data, _data;
@@ -148,13 +147,13 @@ next_knode:
goto next_knode;
}
#ifdef CONFIG_CLS_U32_PERF
- n->pf->kcnts[j] +=1;
+ n->pf->kcnts[j] += 1;
j++;
#endif
}
if (n->ht_down == NULL) {
check_terminal:
- if (n->sel.flags&TC_U32_TERMINAL) {
+ if (n->sel.flags & TC_U32_TERMINAL) {
*res = n->res;
#ifdef CONFIG_NET_CLS_IND
@@ -164,7 +163,7 @@ check_terminal:
}
#endif
#ifdef CONFIG_CLS_U32_PERF
- n->pf->rhit +=1;
+ n->pf->rhit += 1;
#endif
r = tcf_exts_exec(skb, &n->exts, res);
if (r < 0) {
@@ -197,10 +196,10 @@ check_terminal:
sel = ht->divisor & u32_hash_fold(*data, &n->sel,
n->fshift);
}
- if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
+ if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
goto next_ht;
- if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
+ if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
off2 = n->sel.off + 3;
if (n->sel.flags & TC_U32_VAROFFSET) {
__be16 *data, _data;
@@ -215,7 +214,7 @@ check_terminal:
}
off2 &= ~3;
}
- if (n->sel.flags&TC_U32_EAT) {
+ if (n->sel.flags & TC_U32_EAT) {
off += off2;
off2 = 0;
}
@@ -236,11 +235,11 @@ out:
deadloop:
if (net_ratelimit())
- printk(KERN_WARNING "cls_u32: dead loop\n");
+ pr_warning("cls_u32: dead loop\n");
return -1;
}
-static __inline__ struct tc_u_hnode *
+static struct tc_u_hnode *
u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
{
struct tc_u_hnode *ht;
@@ -252,10 +251,10 @@ u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
return ht;
}
-static __inline__ struct tc_u_knode *
+static struct tc_u_knode *
u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
{
- unsigned sel;
+ unsigned int sel;
struct tc_u_knode *n = NULL;
sel = TC_U32_HASH(handle);
@@ -300,7 +299,7 @@ static u32 gen_new_htid(struct tc_u_common *tp_c)
do {
if (++tp_c->hgenerator == 0x7FF)
tp_c->hgenerator = 1;
- } while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
+ } while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
}
@@ -378,9 +377,9 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
{
struct tc_u_knode *n;
- unsigned h;
+ unsigned int h;
- for (h=0; h<=ht->divisor; h++) {
+ for (h = 0; h <= ht->divisor; h++) {
while ((n = ht->ht[h]) != NULL) {
ht->ht[h] = n->next;
@@ -446,13 +445,13 @@ static void u32_destroy(struct tcf_proto *tp)
static int u32_delete(struct tcf_proto *tp, unsigned long arg)
{
- struct tc_u_hnode *ht = (struct tc_u_hnode*)arg;
+ struct tc_u_hnode *ht = (struct tc_u_hnode *)arg;
if (ht == NULL)
return 0;
if (TC_U32_KEY(ht->handle))
- return u32_delete_key(tp, (struct tc_u_knode*)ht);
+ return u32_delete_key(tp, (struct tc_u_knode *)ht);
if (tp->root == ht)
return -EINVAL;
@@ -470,14 +469,14 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg)
static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
{
struct tc_u_knode *n;
- unsigned i = 0x7FF;
+ unsigned int i = 0x7FF;
- for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
+ for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
if (i < TC_U32_NODE(n->handle))
i = TC_U32_NODE(n->handle);
i++;
- return handle|(i>0xFFF ? 0xFFF : i);
+ return handle | (i > 0xFFF ? 0xFFF : i);
}
static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
@@ -566,7 +565,8 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
if (err < 0)
return err;
- if ((n = (struct tc_u_knode*)*arg) != NULL) {
+ n = (struct tc_u_knode *)*arg;
+ if (n) {
if (TC_U32_KEY(n->handle) == 0)
return -EINVAL;
@@ -574,7 +574,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
}
if (tb[TCA_U32_DIVISOR]) {
- unsigned divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
+ unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
if (--divisor > 0x100)
return -EINVAL;
@@ -585,7 +585,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
if (handle == 0)
return -ENOMEM;
}
- ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL);
+ ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
if (ht == NULL)
return -ENOBUFS;
ht->tp_c = tp_c;
@@ -683,7 +683,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
struct tc_u_common *tp_c = tp->data;
struct tc_u_hnode *ht;
struct tc_u_knode *n;
- unsigned h;
+ unsigned int h;
if (arg->stop)
return;
@@ -717,7 +717,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
static int u32_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
- struct tc_u_knode *n = (struct tc_u_knode*)fh;
+ struct tc_u_knode *n = (struct tc_u_knode *)fh;
struct nlattr *nest;
if (n == NULL)
@@ -730,8 +730,9 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
goto nla_put_failure;
if (TC_U32_KEY(n->handle) == 0) {
- struct tc_u_hnode *ht = (struct tc_u_hnode*)fh;
- u32 divisor = ht->divisor+1;
+ struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
+ u32 divisor = ht->divisor + 1;
+
NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor);
} else {
NLA_PUT(skb, TCA_U32_SEL,
@@ -755,7 +756,7 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
goto nla_put_failure;
#ifdef CONFIG_NET_CLS_IND
- if(strlen(n->indev))
+ if (strlen(n->indev))
NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev);
#endif
#ifdef CONFIG_CLS_U32_PERF
diff --git a/net/sched/em_cmp.c b/net/sched/em_cmp.c
index bc45039..1c8360a 100644
--- a/net/sched/em_cmp.c
+++ b/net/sched/em_cmp.c
@@ -33,40 +33,41 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em,
return 0;
switch (cmp->align) {
- case TCF_EM_ALIGN_U8:
- val = *ptr;
- break;
+ case TCF_EM_ALIGN_U8:
+ val = *ptr;
+ break;
- case TCF_EM_ALIGN_U16:
- val = get_unaligned_be16(ptr);
+ case TCF_EM_ALIGN_U16:
+ val = get_unaligned_be16(ptr);
- if (cmp_needs_transformation(cmp))
- val = be16_to_cpu(val);
- break;
+ if (cmp_needs_transformation(cmp))
+ val = be16_to_cpu(val);
+ break;
- case TCF_EM_ALIGN_U32:
- /* Worth checking boundries? The branching seems
- * to get worse. Visit again. */
- val = get_unaligned_be32(ptr);
+ case TCF_EM_ALIGN_U32:
+ /* Worth checking boundries? The branching seems
+ * to get worse. Visit again.
+ */
+ val = get_unaligned_be32(ptr);
- if (cmp_needs_transformation(cmp))
- val = be32_to_cpu(val);
- break;
+ if (cmp_needs_transformation(cmp))
+ val = be32_to_cpu(val);
+ break;
- default:
- return 0;
+ default:
+ return 0;
}
if (cmp->mask)
val &= cmp->mask;
switch (cmp->opnd) {
- case TCF_EM_OPND_EQ:
- return val == cmp->val;
- case TCF_EM_OPND_LT:
- return val < cmp->val;
- case TCF_EM_OPND_GT:
- return val > cmp->val;
+ case TCF_EM_OPND_EQ:
+ return val == cmp->val;
+ case TCF_EM_OPND_LT:
+ return val < cmp->val;
+ case TCF_EM_OPND_GT:
+ return val > cmp->val;
}
return 0;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 34da5e2..a889d09 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -73,21 +73,18 @@
#include <net/pkt_cls.h>
#include <net/sock.h>
-struct meta_obj
-{
+struct meta_obj {
unsigned long value;
unsigned int len;
};
-struct meta_value
-{
+struct meta_value {
struct tcf_meta_val hdr;
unsigned long val;
unsigned int len;
};
-struct meta_match
-{
+struct meta_match {
struct meta_value lvalue;
struct meta_value rvalue;
};
@@ -255,7 +252,7 @@ META_COLLECTOR(int_rtclassid)
if (unlikely(skb_dst(skb) == NULL))
*err = -1;
else
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
dst->value = skb_dst(skb)->tclassid;
#else
dst->value = 0;
@@ -483,8 +480,7 @@ META_COLLECTOR(int_sk_write_pend)
* Meta value collectors assignment table
**************************************************************************/
-struct meta_ops
-{
+struct meta_ops {
void (*get)(struct sk_buff *, struct tcf_pkt_info *,
struct meta_value *, struct meta_obj *, int *);
};
@@ -494,7 +490,7 @@ struct meta_ops
/* Meta value operations table listing all meta value collectors and
* assigns them to a type and meta id. */
-static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
+static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = {
[TCF_META_TYPE_VAR] = {
[META_ID(DEV)] = META_FUNC(var_dev),
[META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if),
@@ -550,7 +546,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
}
};
-static inline struct meta_ops * meta_ops(struct meta_value *val)
+static inline struct meta_ops *meta_ops(struct meta_value *val)
{
return &__meta_ops[meta_type(val)][meta_id(val)];
}
@@ -649,9 +645,8 @@ static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
{
if (v->len == sizeof(unsigned long))
NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val);
- else if (v->len == sizeof(u32)) {
+ else if (v->len == sizeof(u32))
NLA_PUT_U32(skb, tlv, v->val);
- }
return 0;
@@ -663,8 +658,7 @@ nla_put_failure:
* Type specific operations table
**************************************************************************/
-struct meta_type_ops
-{
+struct meta_type_ops {
void (*destroy)(struct meta_value *);
int (*compare)(struct meta_obj *, struct meta_obj *);
int (*change)(struct meta_value *, struct nlattr *);
@@ -672,7 +666,7 @@ struct meta_type_ops
int (*dump)(struct sk_buff *, struct meta_value *, int);
};
-static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX+1] = {
+static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = {
[TCF_META_TYPE_VAR] = {
.destroy = meta_var_destroy,
.compare = meta_var_compare,
@@ -688,7 +682,7 @@ static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX+1] = {
}
};
-static inline struct meta_type_ops * meta_type_ops(struct meta_value *v)
+static inline struct meta_type_ops *meta_type_ops(struct meta_value *v)
{
return &__meta_type_ops[meta_type(v)];
}
@@ -713,7 +707,7 @@ static int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info,
return err;
if (meta_type_ops(v)->apply_extras)
- meta_type_ops(v)->apply_extras(v, dst);
+ meta_type_ops(v)->apply_extras(v, dst);
return 0;
}
@@ -732,12 +726,12 @@ static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m,
r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value);
switch (meta->lvalue.hdr.op) {
- case TCF_EM_OPND_EQ:
- return !r;
- case TCF_EM_OPND_LT:
- return r < 0;
- case TCF_EM_OPND_GT:
- return r > 0;
+ case TCF_EM_OPND_EQ:
+ return !r;
+ case TCF_EM_OPND_LT:
+ return r < 0;
+ case TCF_EM_OPND_GT:
+ return r > 0;
}
return 0;
@@ -771,7 +765,7 @@ static inline int meta_change_data(struct meta_value *dst, struct nlattr *nla)
static inline int meta_is_supported(struct meta_value *val)
{
- return (!meta_id(val) || meta_ops(val)->get);
+ return !meta_id(val) || meta_ops(val)->get;
}
static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = {
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c
index 1a4176a..a3bed07 100644
--- a/net/sched/em_nbyte.c
+++ b/net/sched/em_nbyte.c
@@ -18,8 +18,7 @@
#include <linux/tc_ematch/tc_em_nbyte.h>
#include <net/pkt_cls.h>
-struct nbyte_data
-{
+struct nbyte_data {
struct tcf_em_nbyte hdr;
char pattern[0];
};
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index ea8f566..15d353d 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -19,8 +19,7 @@
#include <linux/tc_ematch/tc_em_text.h>
#include <net/pkt_cls.h>
-struct text_match
-{
+struct text_match {
u16 from_offset;
u16 to_offset;
u8 from_layer;
diff --git a/net/sched/em_u32.c b/net/sched/em_u32.c
index 953f147..797bdb8 100644
--- a/net/sched/em_u32.c
+++ b/net/sched/em_u32.c
@@ -35,7 +35,7 @@ static int em_u32_match(struct sk_buff *skb, struct tcf_ematch *em,
if (!tcf_valid_offset(skb, ptr, sizeof(u32)))
return 0;
- return !(((*(__be32*) ptr) ^ key->val) & key->mask);
+ return !(((*(__be32 *) ptr) ^ key->val) & key->mask);
}
static struct tcf_ematch_ops em_u32_ops = {
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 5e37da9..88d93eb 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -93,7 +93,7 @@
static LIST_HEAD(ematch_ops);
static DEFINE_RWLOCK(ematch_mod_lock);
-static inline struct tcf_ematch_ops * tcf_em_lookup(u16 kind)
+static struct tcf_ematch_ops *tcf_em_lookup(u16 kind)
{
struct tcf_ematch_ops *e = NULL;
@@ -163,8 +163,8 @@ void tcf_em_unregister(struct tcf_ematch_ops *ops)
}
EXPORT_SYMBOL(tcf_em_unregister);
-static inline struct tcf_ematch * tcf_em_get_match(struct tcf_ematch_tree *tree,
- int index)
+static inline struct tcf_ematch *tcf_em_get_match(struct tcf_ematch_tree *tree,
+ int index)
{
return &tree->matches[index];
}
@@ -184,7 +184,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
if (em_hdr->kind == TCF_EM_CONTAINER) {
/* Special ematch called "container", carries an index
- * referencing an external ematch sequence. */
+ * referencing an external ematch sequence.
+ */
u32 ref;
if (data_len < sizeof(ref))
@@ -195,7 +196,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
goto errout;
/* We do not allow backward jumps to avoid loops and jumps
- * to our own position are of course illegal. */
+ * to our own position are of course illegal.
+ */
if (ref <= idx)
goto errout;
@@ -208,7 +210,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
* which automatically releases the reference again, therefore
* the module MUST not be given back under any circumstances
* here. Be aware, the destroy function assumes that the
- * module is held if the ops field is non zero. */
+ * module is held if the ops field is non zero.
+ */
em->ops = tcf_em_lookup(em_hdr->kind);
if (em->ops == NULL) {
@@ -221,7 +224,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
if (em->ops) {
/* We dropped the RTNL mutex in order to
* perform the module load. Tell the caller
- * to replay the request. */
+ * to replay the request.
+ */
module_put(em->ops->owner);
err = -EAGAIN;
}
@@ -230,7 +234,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
}
/* ematch module provides expected length of data, so we
- * can do a basic sanity check. */
+ * can do a basic sanity check.
+ */
if (em->ops->datalen && data_len < em->ops->datalen)
goto errout;
@@ -246,7 +251,8 @@ static int tcf_em_validate(struct tcf_proto *tp,
* TCF_EM_SIMPLE may be specified stating that the
* data only consists of a u32 integer and the module
* does not expected a memory reference but rather
- * the value carried. */
+ * the value carried.
+ */
if (em_hdr->flags & TCF_EM_SIMPLE) {
if (data_len < sizeof(u32))
goto errout;
@@ -334,7 +340,8 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla,
* The array of rt attributes is parsed in the order as they are
* provided, their type must be incremental from 1 to n. Even
* if it does not serve any real purpose, a failure of sticking
- * to this policy will result in parsing failure. */
+ * to this policy will result in parsing failure.
+ */
for (idx = 0; nla_ok(rt_match, list_len); idx++) {
err = -EINVAL;
@@ -359,7 +366,8 @@ int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla,
/* Check if the number of matches provided by userspace actually
* complies with the array of matches. The number was used for
* the validation of references and a mismatch could lead to
- * undefined references during the matching process. */
+ * undefined references during the matching process.
+ */
if (idx != tree_hdr->nmatches) {
err = -EINVAL;
goto errout_abort;
@@ -449,7 +457,7 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv)
.flags = em->flags
};
- NLA_PUT(skb, i+1, sizeof(em_hdr), &em_hdr);
+ NLA_PUT(skb, i + 1, sizeof(em_hdr), &em_hdr);
if (em->ops && em->ops->dump) {
if (em->ops->dump(skb, em) < 0)
@@ -478,6 +486,7 @@ static inline int tcf_em_match(struct sk_buff *skb, struct tcf_ematch *em,
struct tcf_pkt_info *info)
{
int r = em->ops->match(skb, em, info);
+
return tcf_em_is_inverted(em) ? !r : r;
}
@@ -527,8 +536,8 @@ pop_stack:
stack_overflow:
if (net_ratelimit())
- printk(KERN_WARNING "tc ematch: local stack overflow,"
- " increase NET_EMATCH_STACK\n");
+ pr_warning("tc ematch: local stack overflow,"
+ " increase NET_EMATCH_STACK\n");
return -1;
}
EXPORT_SYMBOL(__tcf_em_tree_match);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index b22ca2d..1507415 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -187,7 +187,7 @@ int unregister_qdisc(struct Qdisc_ops *qops)
int err = -ENOENT;
write_lock(&qdisc_mod_lock);
- for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
+ for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
if (q == qops)
break;
if (q) {
@@ -321,7 +321,9 @@ void qdisc_put_rtab(struct qdisc_rate_table *tab)
if (!tab || --tab->refcnt)
return;
- for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) {
+ for (rtabp = &qdisc_rtab_list;
+ (rtab = *rtabp) != NULL;
+ rtabp = &rtab->next) {
if (rtab == tab) {
*rtabp = rtab->next;
kfree(rtab);
@@ -396,6 +398,11 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
return stab;
}
+static void stab_kfree_rcu(struct rcu_head *head)
+{
+ kfree(container_of(head, struct qdisc_size_table, rcu));
+}
+
void qdisc_put_stab(struct qdisc_size_table *tab)
{
if (!tab)
@@ -405,7 +412,7 @@ void qdisc_put_stab(struct qdisc_size_table *tab)
if (--tab->refcnt == 0) {
list_del(&tab->list);
- kfree(tab);
+ call_rcu_bh(&tab->rcu, stab_kfree_rcu);
}
spin_unlock(&qdisc_stab_lock);
@@ -428,7 +435,7 @@ nla_put_failure:
return -1;
}
-void qdisc_calculate_pkt_len(struct sk_buff *skb, struct qdisc_size_table *stab)
+void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
{
int pkt_len, slot;
@@ -454,14 +461,13 @@ out:
pkt_len = 1;
qdisc_skb_cb(skb)->pkt_len = pkt_len;
}
-EXPORT_SYMBOL(qdisc_calculate_pkt_len);
+EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc)
{
if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
- printk(KERN_WARNING
- "%s: %s qdisc %X: is non-work-conserving?\n",
- txt, qdisc->ops->id, qdisc->handle >> 16);
+ pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
+ txt, qdisc->ops->id, qdisc->handle >> 16);
qdisc->flags |= TCQ_F_WARN_NONWC;
}
}
@@ -472,7 +478,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
timer);
- wd->qdisc->flags &= ~TCQ_F_THROTTLED;
+ qdisc_unthrottled(wd->qdisc);
__netif_schedule(qdisc_root(wd->qdisc));
return HRTIMER_NORESTART;
@@ -494,7 +500,7 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
&qdisc_root_sleeping(wd->qdisc)->state))
return;
- wd->qdisc->flags |= TCQ_F_THROTTLED;
+ qdisc_throttled(wd->qdisc);
time = ktime_set(0, 0);
time = ktime_add_ns(time, PSCHED_TICKS2NS(expires));
hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
@@ -504,7 +510,7 @@ EXPORT_SYMBOL(qdisc_watchdog_schedule);
void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
{
hrtimer_cancel(&wd->timer);
- wd->qdisc->flags &= ~TCQ_F_THROTTLED;
+ qdisc_unthrottled(wd->qdisc);
}
EXPORT_SYMBOL(qdisc_watchdog_cancel);
@@ -625,7 +631,7 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
autohandle = TC_H_MAKE(0x80000000U, 0);
} while (qdisc_lookup(dev, autohandle) && --i > 0);
- return i>0 ? autohandle : 0;
+ return i > 0 ? autohandle : 0;
}
void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
@@ -834,7 +840,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
err = PTR_ERR(stab);
goto err_out4;
}
- sch->stab = stab;
+ rcu_assign_pointer(sch->stab, stab);
}
if (tca[TCA_RATE]) {
spinlock_t *root_lock;
@@ -874,7 +880,7 @@ err_out4:
* Any broken qdiscs that would require a ops->reset() here?
* The qdisc was never in action so it shouldn't be necessary.
*/
- qdisc_put_stab(sch->stab);
+ qdisc_put_stab(rtnl_dereference(sch->stab));
if (ops->destroy)
ops->destroy(sch);
goto err_out3;
@@ -882,7 +888,7 @@ err_out4:
static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
{
- struct qdisc_size_table *stab = NULL;
+ struct qdisc_size_table *ostab, *stab = NULL;
int err = 0;
if (tca[TCA_OPTIONS]) {
@@ -899,8 +905,9 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
return PTR_ERR(stab);
}
- qdisc_put_stab(sch->stab);
- sch->stab = stab;
+ ostab = rtnl_dereference(sch->stab);
+ rcu_assign_pointer(sch->stab, stab);
+ qdisc_put_stab(ostab);
if (tca[TCA_RATE]) {
/* NB: ignores errors from replace_estimator
@@ -915,9 +922,8 @@ out:
return 0;
}
-struct check_loop_arg
-{
- struct qdisc_walker w;
+struct check_loop_arg {
+ struct qdisc_walker w;
struct Qdisc *p;
int depth;
};
@@ -970,7 +976,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
struct Qdisc *p = NULL;
int err;
- if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+ dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
return -ENODEV;
err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -980,12 +987,12 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (clid) {
if (clid != TC_H_ROOT) {
if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
- if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
+ p = qdisc_lookup(dev, TC_H_MAJ(clid));
+ if (!p)
return -ENOENT;
q = qdisc_leaf(p, clid);
- } else { /* ingress */
- if (dev_ingress_queue(dev))
- q = dev_ingress_queue(dev)->qdisc_sleeping;
+ } else if (dev_ingress_queue(dev)) {
+ q = dev_ingress_queue(dev)->qdisc_sleeping;
}
} else {
q = dev->qdisc;
@@ -996,7 +1003,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
return -EINVAL;
} else {
- if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
+ q = qdisc_lookup(dev, tcm->tcm_handle);
+ if (!q)
return -ENOENT;
}
@@ -1008,7 +1016,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
return -EINVAL;
if (q->handle == 0)
return -ENOENT;
- if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0)
+ err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
+ if (err != 0)
return err;
} else {
qdisc_notify(net, skb, n, clid, NULL, q);
@@ -1017,7 +1026,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
}
/*
- Create/change qdisc.
+ * Create/change qdisc.
*/
static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
@@ -1036,7 +1045,8 @@ replay:
clid = tcm->tcm_parent;
q = p = NULL;
- if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+ dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
return -ENODEV;
err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1046,12 +1056,12 @@ replay:
if (clid) {
if (clid != TC_H_ROOT) {
if (clid != TC_H_INGRESS) {
- if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
+ p = qdisc_lookup(dev, TC_H_MAJ(clid));
+ if (!p)
return -ENOENT;
q = qdisc_leaf(p, clid);
- } else { /* ingress */
- if (dev_ingress_queue_create(dev))
- q = dev_ingress_queue(dev)->qdisc_sleeping;
+ } else if (dev_ingress_queue_create(dev)) {
+ q = dev_ingress_queue(dev)->qdisc_sleeping;
}
} else {
q = dev->qdisc;
@@ -1063,13 +1073,14 @@ replay:
if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
if (tcm->tcm_handle) {
- if (q && !(n->nlmsg_flags&NLM_F_REPLACE))
+ if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
return -EEXIST;
if (TC_H_MIN(tcm->tcm_handle))
return -EINVAL;
- if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
+ q = qdisc_lookup(dev, tcm->tcm_handle);
+ if (!q)
goto create_n_graft;
- if (n->nlmsg_flags&NLM_F_EXCL)
+ if (n->nlmsg_flags & NLM_F_EXCL)
return -EEXIST;
if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
return -EINVAL;
@@ -1079,7 +1090,7 @@ replay:
atomic_inc(&q->refcnt);
goto graft;
} else {
- if (q == NULL)
+ if (!q)
goto create_n_graft;
/* This magic test requires explanation.
@@ -1101,9 +1112,9 @@ replay:
* For now we select create/graft, if
* user gave KIND, which does not match existing.
*/
- if ((n->nlmsg_flags&NLM_F_CREATE) &&
- (n->nlmsg_flags&NLM_F_REPLACE) &&
- ((n->nlmsg_flags&NLM_F_EXCL) ||
+ if ((n->nlmsg_flags & NLM_F_CREATE) &&
+ (n->nlmsg_flags & NLM_F_REPLACE) &&
+ ((n->nlmsg_flags & NLM_F_EXCL) ||
(tca[TCA_KIND] &&
nla_strcmp(tca[TCA_KIND], q->ops->id))))
goto create_n_graft;
@@ -1118,7 +1129,7 @@ replay:
/* Change qdisc parameters */
if (q == NULL)
return -ENOENT;
- if (n->nlmsg_flags&NLM_F_EXCL)
+ if (n->nlmsg_flags & NLM_F_EXCL)
return -EEXIST;
if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
return -EINVAL;
@@ -1128,7 +1139,7 @@ replay:
return err;
create_n_graft:
- if (!(n->nlmsg_flags&NLM_F_CREATE))
+ if (!(n->nlmsg_flags & NLM_F_CREATE))
return -ENOENT;
if (clid == TC_H_INGRESS) {
if (dev_ingress_queue(dev))
@@ -1175,6 +1186,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
struct nlmsghdr *nlh;
unsigned char *b = skb_tail_pointer(skb);
struct gnet_dump d;
+ struct qdisc_size_table *stab;
nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
tcm = NLMSG_DATA(nlh);
@@ -1190,7 +1202,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
goto nla_put_failure;
q->qstats.qlen = q->q.qlen;
- if (q->stab && qdisc_dump_stab(skb, q->stab) < 0)
+ stab = rtnl_dereference(q->stab);
+ if (stab && qdisc_dump_stab(skb, stab) < 0)
goto nla_put_failure;
if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
@@ -1234,16 +1247,19 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb,
return -ENOBUFS;
if (old && !tc_qdisc_dump_ignore(old)) {
- if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0)
+ if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq,
+ 0, RTM_DELQDISC) < 0)
goto err_out;
}
if (new && !tc_qdisc_dump_ignore(new)) {
- if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
+ if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq,
+ old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
goto err_out;
}
if (skb->len)
- return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+ return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+ n->nlmsg_flags & NLM_F_ECHO);
err_out:
kfree_skb(skb);
@@ -1275,7 +1291,7 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
q_idx++;
continue;
}
- if (!tc_qdisc_dump_ignore(q) &&
+ if (!tc_qdisc_dump_ignore(q) &&
tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
goto done;
@@ -1356,7 +1372,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
u32 qid = TC_H_MAJ(clid);
int err;
- if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+ dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
return -ENODEV;
err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1391,9 +1408,9 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
qid = dev->qdisc->handle;
/* Now qid is genuine qdisc handle consistent
- both with parent and child.
-
- TC_H_MAJ(pid) still may be unspecified, complete it now.
+ * both with parent and child.
+ *
+ * TC_H_MAJ(pid) still may be unspecified, complete it now.
*/
if (pid)
pid = TC_H_MAKE(qid, pid);
@@ -1403,7 +1420,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
}
/* OK. Locate qdisc */
- if ((q = qdisc_lookup(dev, qid)) == NULL)
+ q = qdisc_lookup(dev, qid);
+ if (!q)
return -ENOENT;
/* An check that it supports classes */
@@ -1423,13 +1441,14 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
if (cl == 0) {
err = -ENOENT;
- if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE))
+ if (n->nlmsg_type != RTM_NEWTCLASS ||
+ !(n->nlmsg_flags & NLM_F_CREATE))
goto out;
} else {
switch (n->nlmsg_type) {
case RTM_NEWTCLASS:
err = -EEXIST;
- if (n->nlmsg_flags&NLM_F_EXCL)
+ if (n->nlmsg_flags & NLM_F_EXCL)
goto out;
break;
case RTM_DELTCLASS:
@@ -1521,14 +1540,14 @@ static int tclass_notify(struct net *net, struct sk_buff *oskb,
return -EINVAL;
}
- return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+ return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+ n->nlmsg_flags & NLM_F_ECHO);
}
-struct qdisc_dump_args
-{
- struct qdisc_walker w;
- struct sk_buff *skb;
- struct netlink_callback *cb;
+struct qdisc_dump_args {
+ struct qdisc_walker w;
+ struct sk_buff *skb;
+ struct netlink_callback *cb;
};
static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
@@ -1590,7 +1609,7 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
+ struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh);
struct net *net = sock_net(skb->sk);
struct netdev_queue *dev_queue;
struct net_device *dev;
@@ -1598,7 +1617,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
return 0;
- if ((dev = dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+ dev = dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
return 0;
s_t = cb->args[0];
@@ -1621,19 +1641,22 @@ done:
}
/* Main classifier routine: scans classifier chain attached
- to this qdisc, (optionally) tests for protocol and asks
- specific classifiers.
+ * to this qdisc, (optionally) tests for protocol and asks
+ * specific classifiers.
*/
int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
struct tcf_result *res)
{
__be16 protocol = skb->protocol;
- int err = 0;
+ int err;
for (; tp; tp = tp->next) {
- if ((tp->protocol == protocol ||
- tp->protocol == htons(ETH_P_ALL)) &&
- (err = tp->classify(skb, tp, res)) >= 0) {
+ if (tp->protocol != protocol &&
+ tp->protocol != htons(ETH_P_ALL))
+ continue;
+ err = tp->classify(skb, tp, res);
+
+ if (err >= 0) {
#ifdef CONFIG_NET_CLS_ACT
if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
@@ -1664,11 +1687,11 @@ reclassify:
if (verd++ >= MAX_REC_LOOP) {
if (net_ratelimit())
- printk(KERN_NOTICE
- "%s: packet reclassify loop"
+ pr_notice("%s: packet reclassify loop"
" rule prio %u protocol %02x\n",
- tp->q->ops->id,
- tp->prio & 0xffff, ntohs(tp->protocol));
+ tp->q->ops->id,
+ tp->prio & 0xffff,
+ ntohs(tp->protocol));
return TC_ACT_SHOT;
}
skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
@@ -1761,7 +1784,7 @@ static int __init pktsched_init(void)
err = register_pernet_subsys(&psched_net_ops);
if (err) {
- printk(KERN_ERR "pktsched_init: "
+ pr_err("pktsched_init: "
"cannot initialize per netns operations\n");
return err;
}
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 943d733..3f08158 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -319,7 +319,7 @@ static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
* creation), and one for the reference held when calling delete.
*/
if (flow->ref < 2) {
- printk(KERN_ERR "atm_tc_delete: flow->ref == %d\n", flow->ref);
+ pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref);
return -EINVAL;
}
if (flow->ref > 2)
@@ -384,12 +384,12 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
}
flow = NULL;
- done:
- ;
+done:
+ ;
}
- if (!flow)
+ if (!flow) {
flow = &p->link;
- else {
+ } else {
if (flow->vcc)
ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
/*@@@ looks good ... but it's not supposed to work :-) */
@@ -576,8 +576,7 @@ static void atm_tc_destroy(struct Qdisc *sch)
list_for_each_entry_safe(flow, tmp, &p->flows, list) {
if (flow->ref > 1)
- printk(KERN_ERR "atm_destroy: %p->ref = %d\n", flow,
- flow->ref);
+ pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref);
atm_tc_put(sch, (unsigned long)flow);
}
tasklet_kill(&p->task);
@@ -616,9 +615,8 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
}
if (flow->excess)
NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid);
- else {
+ else
NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0);
- }
nla_nest_end(skb, nest);
return skb->len;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 5f63ec5..24d94c0 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -72,8 +72,7 @@
struct cbq_sched_data;
-struct cbq_class
-{
+struct cbq_class {
struct Qdisc_class_common common;
struct cbq_class *next_alive; /* next class with backlog in this priority band */
@@ -139,19 +138,18 @@ struct cbq_class
int refcnt;
int filters;
- struct cbq_class *defaults[TC_PRIO_MAX+1];
+ struct cbq_class *defaults[TC_PRIO_MAX + 1];
};
-struct cbq_sched_data
-{
+struct cbq_sched_data {
struct Qdisc_class_hash clhash; /* Hash table of all classes */
- int nclasses[TC_CBQ_MAXPRIO+1];
- unsigned quanta[TC_CBQ_MAXPRIO+1];
+ int nclasses[TC_CBQ_MAXPRIO + 1];
+ unsigned int quanta[TC_CBQ_MAXPRIO + 1];
struct cbq_class link;
- unsigned activemask;
- struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes
+ unsigned int activemask;
+ struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
with backlog */
#ifdef CONFIG_NET_CLS_ACT
@@ -162,7 +160,7 @@ struct cbq_sched_data
int tx_len;
psched_time_t now; /* Cached timestamp */
psched_time_t now_rt; /* Cached real time */
- unsigned pmask;
+ unsigned int pmask;
struct hrtimer delay_timer;
struct qdisc_watchdog watchdog; /* Watchdog timer,
@@ -175,9 +173,9 @@ struct cbq_sched_data
};
-#define L2T(cl,len) qdisc_l2t((cl)->R_tab,len)
+#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
-static __inline__ struct cbq_class *
+static inline struct cbq_class *
cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
{
struct Qdisc_class_common *clc;
@@ -193,25 +191,27 @@ cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
static struct cbq_class *
cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
{
- struct cbq_class *cl, *new;
+ struct cbq_class *cl;
- for (cl = this->tparent; cl; cl = cl->tparent)
- if ((new = cl->defaults[TC_PRIO_BESTEFFORT]) != NULL && new != this)
- return new;
+ for (cl = this->tparent; cl; cl = cl->tparent) {
+ struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
+ if (new != NULL && new != this)
+ return new;
+ }
return NULL;
}
#endif
/* Classify packet. The procedure is pretty complicated, but
- it allows us to combine link sharing and priority scheduling
- transparently.
-
- Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
- so that it resolves to split nodes. Then packets are classified
- by logical priority, or a more specific classifier may be attached
- to the split node.
+ * it allows us to combine link sharing and priority scheduling
+ * transparently.
+ *
+ * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
+ * so that it resolves to split nodes. Then packets are classified
+ * by logical priority, or a more specific classifier may be attached
+ * to the split node.
*/
static struct cbq_class *
@@ -227,7 +227,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
/*
* Step 1. If skb->priority points to one of our classes, use it.
*/
- if (TC_H_MAJ(prio^sch->handle) == 0 &&
+ if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
(cl = cbq_class_lookup(q, prio)) != NULL)
return cl;
@@ -243,10 +243,11 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
(result = tc_classify_compat(skb, head->filter_list, &res)) < 0)
goto fallback;
- if ((cl = (void*)res.class) == NULL) {
+ cl = (void *)res.class;
+ if (!cl) {
if (TC_H_MAJ(res.classid))
cl = cbq_class_lookup(q, res.classid);
- else if ((cl = defmap[res.classid&TC_PRIO_MAX]) == NULL)
+ else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
cl = defmap[TC_PRIO_BESTEFFORT];
if (cl == NULL || cl->level >= head->level)
@@ -282,7 +283,7 @@ fallback:
* Step 4. No success...
*/
if (TC_H_MAJ(prio) == 0 &&
- !(cl = head->defaults[prio&TC_PRIO_MAX]) &&
+ !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
!(cl = head->defaults[TC_PRIO_BESTEFFORT]))
return head;
@@ -290,12 +291,12 @@ fallback:
}
/*
- A packet has just been enqueued on the empty class.
- cbq_activate_class adds it to the tail of active class list
- of its priority band.
+ * A packet has just been enqueued on the empty class.
+ * cbq_activate_class adds it to the tail of active class list
+ * of its priority band.
*/
-static __inline__ void cbq_activate_class(struct cbq_class *cl)
+static inline void cbq_activate_class(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
int prio = cl->cpriority;
@@ -314,9 +315,9 @@ static __inline__ void cbq_activate_class(struct cbq_class *cl)
}
/*
- Unlink class from active chain.
- Note that this same procedure is done directly in cbq_dequeue*
- during round-robin procedure.
+ * Unlink class from active chain.
+ * Note that this same procedure is done directly in cbq_dequeue*
+ * during round-robin procedure.
*/
static void cbq_deactivate_class(struct cbq_class *this)
@@ -350,7 +351,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
{
int toplevel = q->toplevel;
- if (toplevel > cl->level && !(cl->q->flags&TCQ_F_THROTTLED)) {
+ if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) {
psched_time_t now;
psched_tdiff_t incr;
@@ -363,7 +364,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
q->toplevel = cl->level;
return;
}
- } while ((cl=cl->borrow) != NULL && toplevel > cl->level);
+ } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
}
}
@@ -417,11 +418,11 @@ static void cbq_ovl_classic(struct cbq_class *cl)
delay += cl->offtime;
/*
- Class goes to sleep, so that it will have no
- chance to work avgidle. Let's forgive it 8)
-
- BTW cbq-2.0 has a crap in this
- place, apparently they forgot to shift it by cl->ewma_log.
+ * Class goes to sleep, so that it will have no
+ * chance to work avgidle. Let's forgive it 8)
+ *
+ * BTW cbq-2.0 has a crap in this
+ * place, apparently they forgot to shift it by cl->ewma_log.
*/
if (cl->avgidle < 0)
delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
@@ -438,8 +439,8 @@ static void cbq_ovl_classic(struct cbq_class *cl)
q->wd_expires = delay;
/* Dirty work! We must schedule wakeups based on
- real available rate, rather than leaf rate,
- which may be tiny (even zero).
+ * real available rate, rather than leaf rate,
+ * which may be tiny (even zero).
*/
if (q->toplevel == TC_CBQ_MAXLEVEL) {
struct cbq_class *b;
@@ -459,7 +460,7 @@ static void cbq_ovl_classic(struct cbq_class *cl)
}
/* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when
- they go overlimit
+ * they go overlimit
*/
static void cbq_ovl_rclassic(struct cbq_class *cl)
@@ -594,7 +595,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
struct Qdisc *sch = q->watchdog.qdisc;
psched_time_t now;
psched_tdiff_t delay = 0;
- unsigned pmask;
+ unsigned int pmask;
now = psched_get_time();
@@ -623,7 +624,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
}
- sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_unthrottled(sch);
__netif_schedule(qdisc_root(sch));
return HRTIMER_NORESTART;
}
@@ -663,15 +664,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
#endif
/*
- It is mission critical procedure.
-
- We "regenerate" toplevel cutoff, if transmitting class
- has backlog and it is not regulated. It is not part of
- original CBQ description, but looks more reasonable.
- Probably, it is wrong. This question needs further investigation.
-*/
+ * It is mission critical procedure.
+ *
+ * We "regenerate" toplevel cutoff, if transmitting class
+ * has backlog and it is not regulated. It is not part of
+ * original CBQ description, but looks more reasonable.
+ * Probably, it is wrong. This question needs further investigation.
+ */
-static __inline__ void
+static inline void
cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
struct cbq_class *borrowed)
{
@@ -682,7 +683,7 @@ cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
q->toplevel = borrowed->level;
return;
}
- } while ((borrowed=borrowed->borrow) != NULL);
+ } while ((borrowed = borrowed->borrow) != NULL);
}
#if 0
/* It is not necessary now. Uncommenting it
@@ -710,10 +711,10 @@ cbq_update(struct cbq_sched_data *q)
cl->bstats.bytes += len;
/*
- (now - last) is total time between packet right edges.
- (last_pktlen/rate) is "virtual" busy time, so that
-
- idle = (now - last) - last_pktlen/rate
+ * (now - last) is total time between packet right edges.
+ * (last_pktlen/rate) is "virtual" busy time, so that
+ *
+ * idle = (now - last) - last_pktlen/rate
*/
idle = q->now - cl->last;
@@ -723,9 +724,9 @@ cbq_update(struct cbq_sched_data *q)
idle -= L2T(cl, len);
/* true_avgidle := (1-W)*true_avgidle + W*idle,
- where W=2^{-ewma_log}. But cl->avgidle is scaled:
- cl->avgidle == true_avgidle/W,
- hence:
+ * where W=2^{-ewma_log}. But cl->avgidle is scaled:
+ * cl->avgidle == true_avgidle/W,
+ * hence:
*/
avgidle += idle - (avgidle>>cl->ewma_log);
}
@@ -739,22 +740,22 @@ cbq_update(struct cbq_sched_data *q)
cl->avgidle = avgidle;
/* Calculate expected time, when this class
- will be allowed to send.
- It will occur, when:
- (1-W)*true_avgidle + W*delay = 0, i.e.
- idle = (1/W - 1)*(-true_avgidle)
- or
- idle = (1 - W)*(-cl->avgidle);
+ * will be allowed to send.
+ * It will occur, when:
+ * (1-W)*true_avgidle + W*delay = 0, i.e.
+ * idle = (1/W - 1)*(-true_avgidle)
+ * or
+ * idle = (1 - W)*(-cl->avgidle);
*/
idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
/*
- That is not all.
- To maintain the rate allocated to the class,
- we add to undertime virtual clock,
- necessary to complete transmitted packet.
- (len/phys_bandwidth has been already passed
- to the moment of cbq_update)
+ * That is not all.
+ * To maintain the rate allocated to the class,
+ * we add to undertime virtual clock,
+ * necessary to complete transmitted packet.
+ * (len/phys_bandwidth has been already passed
+ * to the moment of cbq_update)
*/
idle -= L2T(&q->link, len);
@@ -776,7 +777,7 @@ cbq_update(struct cbq_sched_data *q)
cbq_update_toplevel(q, this, q->tx_borrowed);
}
-static __inline__ struct cbq_class *
+static inline struct cbq_class *
cbq_under_limit(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
@@ -792,16 +793,17 @@ cbq_under_limit(struct cbq_class *cl)
do {
/* It is very suspicious place. Now overlimit
- action is generated for not bounded classes
- only if link is completely congested.
- Though it is in agree with ancestor-only paradigm,
- it looks very stupid. Particularly,
- it means that this chunk of code will either
- never be called or result in strong amplification
- of burstiness. Dangerous, silly, and, however,
- no another solution exists.
+ * action is generated for not bounded classes
+ * only if link is completely congested.
+ * Though it is in agree with ancestor-only paradigm,
+ * it looks very stupid. Particularly,
+ * it means that this chunk of code will either
+ * never be called or result in strong amplification
+ * of burstiness. Dangerous, silly, and, however,
+ * no another solution exists.
*/
- if ((cl = cl->borrow) == NULL) {
+ cl = cl->borrow;
+ if (!cl) {
this_cl->qstats.overlimits++;
this_cl->overlimit(this_cl);
return NULL;
@@ -814,7 +816,7 @@ cbq_under_limit(struct cbq_class *cl)
return cl;
}
-static __inline__ struct sk_buff *
+static inline struct sk_buff *
cbq_dequeue_prio(struct Qdisc *sch, int prio)
{
struct cbq_sched_data *q = qdisc_priv(sch);
@@ -838,7 +840,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
if (cl->deficit <= 0) {
/* Class exhausted its allotment per
- this round. Switch to the next one.
+ * this round. Switch to the next one.
*/
deficit = 1;
cl->deficit += cl->quantum;
@@ -848,8 +850,8 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
skb = cl->q->dequeue(cl->q);
/* Class did not give us any skb :-(
- It could occur even if cl->q->q.qlen != 0
- f.e. if cl->q == "tbf"
+ * It could occur even if cl->q->q.qlen != 0
+ * f.e. if cl->q == "tbf"
*/
if (skb == NULL)
goto skip_class;
@@ -878,7 +880,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
skip_class:
if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
/* Class is empty or penalized.
- Unlink it from active chain.
+ * Unlink it from active chain.
*/
cl_prev->next_alive = cl->next_alive;
cl->next_alive = NULL;
@@ -917,14 +919,14 @@ next_class:
return NULL;
}
-static __inline__ struct sk_buff *
+static inline struct sk_buff *
cbq_dequeue_1(struct Qdisc *sch)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
- unsigned activemask;
+ unsigned int activemask;
- activemask = q->activemask&0xFF;
+ activemask = q->activemask & 0xFF;
while (activemask) {
int prio = ffz(~activemask);
activemask &= ~(1<<prio);
@@ -949,11 +951,11 @@ cbq_dequeue(struct Qdisc *sch)
if (q->tx_class) {
psched_tdiff_t incr2;
/* Time integrator. We calculate EOS time
- by adding expected packet transmission time.
- If real time is greater, we warp artificial clock,
- so that:
-
- cbq_time = max(real_time, work);
+ * by adding expected packet transmission time.
+ * If real time is greater, we warp artificial clock,
+ * so that:
+ *
+ * cbq_time = max(real_time, work);
*/
incr2 = L2T(&q->link, q->tx_len);
q->now += incr2;
@@ -971,27 +973,27 @@ cbq_dequeue(struct Qdisc *sch)
if (skb) {
qdisc_bstats_update(sch, skb);
sch->q.qlen--;
- sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_unthrottled(sch);
return skb;
}
/* All the classes are overlimit.
-
- It is possible, if:
-
- 1. Scheduler is empty.
- 2. Toplevel cutoff inhibited borrowing.
- 3. Root class is overlimit.
-
- Reset 2d and 3d conditions and retry.
-
- Note, that NS and cbq-2.0 are buggy, peeking
- an arbitrary class is appropriate for ancestor-only
- sharing, but not for toplevel algorithm.
-
- Our version is better, but slower, because it requires
- two passes, but it is unavoidable with top-level sharing.
- */
+ *
+ * It is possible, if:
+ *
+ * 1. Scheduler is empty.
+ * 2. Toplevel cutoff inhibited borrowing.
+ * 3. Root class is overlimit.
+ *
+ * Reset 2d and 3d conditions and retry.
+ *
+ * Note, that NS and cbq-2.0 are buggy, peeking
+ * an arbitrary class is appropriate for ancestor-only
+ * sharing, but not for toplevel algorithm.
+ *
+ * Our version is better, but slower, because it requires
+ * two passes, but it is unavoidable with top-level sharing.
+ */
if (q->toplevel == TC_CBQ_MAXLEVEL &&
q->link.undertime == PSCHED_PASTPERFECT)
@@ -1002,7 +1004,8 @@ cbq_dequeue(struct Qdisc *sch)
}
/* No packets in scheduler or nobody wants to give them to us :-(
- Sigh... start watchdog timer in the last case. */
+ * Sigh... start watchdog timer in the last case.
+ */
if (sch->q.qlen) {
sch->qstats.overlimits++;
@@ -1024,13 +1027,14 @@ static void cbq_adjust_levels(struct cbq_class *this)
int level = 0;
struct cbq_class *cl;
- if ((cl = this->children) != NULL) {
+ cl = this->children;
+ if (cl) {
do {
if (cl->level > level)
level = cl->level;
} while ((cl = cl->sibling) != this->children);
}
- this->level = level+1;
+ this->level = level + 1;
} while ((this = this->tparent) != NULL);
}
@@ -1046,14 +1050,15 @@ static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
for (h = 0; h < q->clhash.hashsize; h++) {
hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
/* BUGGGG... Beware! This expression suffer of
- arithmetic overflows!
+ * arithmetic overflows!
*/
if (cl->priority == prio) {
cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
q->quanta[prio];
}
if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) {
- printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum);
+ pr_warning("CBQ: class %08x has bad quantum==%ld, repaired.\n",
+ cl->common.classid, cl->quantum);
cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
}
}
@@ -1064,18 +1069,18 @@ static void cbq_sync_defmap(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
struct cbq_class *split = cl->split;
- unsigned h;
+ unsigned int h;
int i;
if (split == NULL)
return;
- for (i=0; i<=TC_PRIO_MAX; i++) {
- if (split->defaults[i] == cl && !(cl->defmap&(1<<i)))
+ for (i = 0; i <= TC_PRIO_MAX; i++) {
+ if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
split->defaults[i] = NULL;
}
- for (i=0; i<=TC_PRIO_MAX; i++) {
+ for (i = 0; i <= TC_PRIO_MAX; i++) {
int level = split->level;
if (split->defaults[i])
@@ -1088,7 +1093,7 @@ static void cbq_sync_defmap(struct cbq_class *cl)
hlist_for_each_entry(c, n, &q->clhash.hash[h],
common.hnode) {
if (c->split == split && c->level < level &&
- c->defmap&(1<<i)) {
+ c->defmap & (1<<i)) {
split->defaults[i] = c;
level = c->level;
}
@@ -1102,7 +1107,8 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma
struct cbq_class *split = NULL;
if (splitid == 0) {
- if ((split = cl->split) == NULL)
+ split = cl->split;
+ if (!split)
return;
splitid = split->common.classid;
}
@@ -1120,9 +1126,9 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma
cl->defmap = 0;
cbq_sync_defmap(cl);
cl->split = split;
- cl->defmap = def&mask;
+ cl->defmap = def & mask;
} else
- cl->defmap = (cl->defmap&~mask)|(def&mask);
+ cl->defmap = (cl->defmap & ~mask) | (def & mask);
cbq_sync_defmap(cl);
}
@@ -1135,7 +1141,7 @@ static void cbq_unlink_class(struct cbq_class *this)
qdisc_class_hash_remove(&q->clhash, &this->common);
if (this->tparent) {
- clp=&this->sibling;
+ clp = &this->sibling;
cl = *clp;
do {
if (cl == this) {
@@ -1174,7 +1180,7 @@ static void cbq_link_class(struct cbq_class *this)
}
}
-static unsigned int cbq_drop(struct Qdisc* sch)
+static unsigned int cbq_drop(struct Qdisc *sch)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl, *cl_head;
@@ -1182,7 +1188,8 @@ static unsigned int cbq_drop(struct Qdisc* sch)
unsigned int len;
for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) {
- if ((cl_head = q->active[prio]) == NULL)
+ cl_head = q->active[prio];
+ if (!cl_head)
continue;
cl = cl_head;
@@ -1199,13 +1206,13 @@ static unsigned int cbq_drop(struct Qdisc* sch)
}
static void
-cbq_reset(struct Qdisc* sch)
+cbq_reset(struct Qdisc *sch)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl;
struct hlist_node *n;
int prio;
- unsigned h;
+ unsigned int h;
q->activemask = 0;
q->pmask = 0;
@@ -1237,21 +1244,21 @@ cbq_reset(struct Qdisc* sch)
static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
{
- if (lss->change&TCF_CBQ_LSS_FLAGS) {
- cl->share = (lss->flags&TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
- cl->borrow = (lss->flags&TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
+ if (lss->change & TCF_CBQ_LSS_FLAGS) {
+ cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
+ cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
}
- if (lss->change&TCF_CBQ_LSS_EWMA)
+ if (lss->change & TCF_CBQ_LSS_EWMA)
cl->ewma_log = lss->ewma_log;
- if (lss->change&TCF_CBQ_LSS_AVPKT)
+ if (lss->change & TCF_CBQ_LSS_AVPKT)
cl->avpkt = lss->avpkt;
- if (lss->change&TCF_CBQ_LSS_MINIDLE)
+ if (lss->change & TCF_CBQ_LSS_MINIDLE)
cl->minidle = -(long)lss->minidle;
- if (lss->change&TCF_CBQ_LSS_MAXIDLE) {
+ if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
cl->maxidle = lss->maxidle;
cl->avgidle = lss->maxidle;
}
- if (lss->change&TCF_CBQ_LSS_OFFTIME)
+ if (lss->change & TCF_CBQ_LSS_OFFTIME)
cl->offtime = lss->offtime;
return 0;
}
@@ -1279,10 +1286,10 @@ static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
if (wrr->weight)
cl->weight = wrr->weight;
if (wrr->priority) {
- cl->priority = wrr->priority-1;
+ cl->priority = wrr->priority - 1;
cl->cpriority = cl->priority;
if (cl->priority >= cl->priority2)
- cl->priority2 = TC_CBQ_MAXPRIO-1;
+ cl->priority2 = TC_CBQ_MAXPRIO - 1;
}
cbq_addprio(q, cl);
@@ -1299,10 +1306,10 @@ static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
cl->overlimit = cbq_ovl_delay;
break;
case TC_CBQ_OVL_LOWPRIO:
- if (ovl->priority2-1 >= TC_CBQ_MAXPRIO ||
- ovl->priority2-1 <= cl->priority)
+ if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO ||
+ ovl->priority2 - 1 <= cl->priority)
return -EINVAL;
- cl->priority2 = ovl->priority2-1;
+ cl->priority2 = ovl->priority2 - 1;
cl->overlimit = cbq_ovl_lowprio;
break;
case TC_CBQ_OVL_DROP:
@@ -1381,9 +1388,9 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
if (!q->link.q)
q->link.q = &noop_qdisc;
- q->link.priority = TC_CBQ_MAXPRIO-1;
- q->link.priority2 = TC_CBQ_MAXPRIO-1;
- q->link.cpriority = TC_CBQ_MAXPRIO-1;
+ q->link.priority = TC_CBQ_MAXPRIO - 1;
+ q->link.priority2 = TC_CBQ_MAXPRIO - 1;
+ q->link.cpriority = TC_CBQ_MAXPRIO - 1;
q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
q->link.overlimit = cbq_ovl_classic;
q->link.allot = psched_mtu(qdisc_dev(sch));
@@ -1414,7 +1421,7 @@ put_rtab:
return err;
}
-static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
@@ -1426,7 +1433,7 @@ nla_put_failure:
return -1;
}
-static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_lssopt opt;
@@ -1451,15 +1458,15 @@ nla_put_failure:
return -1;
}
-static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_wrropt opt;
opt.flags = 0;
opt.allot = cl->allot;
- opt.priority = cl->priority+1;
- opt.cpriority = cl->cpriority+1;
+ opt.priority = cl->priority + 1;
+ opt.cpriority = cl->cpriority + 1;
opt.weight = cl->weight;
NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt);
return skb->len;
@@ -1469,13 +1476,13 @@ nla_put_failure:
return -1;
}
-static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_ovl opt;
opt.strategy = cl->ovl_strategy;
- opt.priority2 = cl->priority2+1;
+ opt.priority2 = cl->priority2 + 1;
opt.pad = 0;
opt.penalty = cl->penalty;
NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt);
@@ -1486,7 +1493,7 @@ nla_put_failure:
return -1;
}
-static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_fopt opt;
@@ -1505,7 +1512,7 @@ nla_put_failure:
}
#ifdef CONFIG_NET_CLS_ACT
-static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_police opt;
@@ -1569,7 +1576,7 @@ static int
cbq_dump_class(struct Qdisc *sch, unsigned long arg,
struct sk_buff *skb, struct tcmsg *tcm)
{
- struct cbq_class *cl = (struct cbq_class*)arg;
+ struct cbq_class *cl = (struct cbq_class *)arg;
struct nlattr *nest;
if (cl->tparent)
@@ -1597,7 +1604,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
struct gnet_dump *d)
{
struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl = (struct cbq_class*)arg;
+ struct cbq_class *cl = (struct cbq_class *)arg;
cl->qstats.qlen = cl->q->q.qlen;
cl->xstats.avgidle = cl->avgidle;
@@ -1617,7 +1624,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
struct Qdisc **old)
{
- struct cbq_class *cl = (struct cbq_class*)arg;
+ struct cbq_class *cl = (struct cbq_class *)arg;
if (new == NULL) {
new = qdisc_create_dflt(sch->dev_queue,
@@ -1640,10 +1647,9 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
return 0;
}
-static struct Qdisc *
-cbq_leaf(struct Qdisc *sch, unsigned long arg)
+static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
{
- struct cbq_class *cl = (struct cbq_class*)arg;
+ struct cbq_class *cl = (struct cbq_class *)arg;
return cl->q;
}
@@ -1682,13 +1688,12 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
kfree(cl);
}
-static void
-cbq_destroy(struct Qdisc* sch)
+static void cbq_destroy(struct Qdisc *sch)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct hlist_node *n, *next;
struct cbq_class *cl;
- unsigned h;
+ unsigned int h;
#ifdef CONFIG_NET_CLS_ACT
q->rx_class = NULL;
@@ -1712,7 +1717,7 @@ cbq_destroy(struct Qdisc* sch)
static void cbq_put(struct Qdisc *sch, unsigned long arg)
{
- struct cbq_class *cl = (struct cbq_class*)arg;
+ struct cbq_class *cl = (struct cbq_class *)arg;
if (--cl->refcnt == 0) {
#ifdef CONFIG_NET_CLS_ACT
@@ -1735,7 +1740,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
{
int err;
struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl = (struct cbq_class*)*arg;
+ struct cbq_class *cl = (struct cbq_class *)*arg;
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_CBQ_MAX + 1];
struct cbq_class *parent;
@@ -1827,13 +1832,14 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
if (classid) {
err = -EINVAL;
- if (TC_H_MAJ(classid^sch->handle) || cbq_class_lookup(q, classid))
+ if (TC_H_MAJ(classid ^ sch->handle) ||
+ cbq_class_lookup(q, classid))
goto failure;
} else {
int i;
- classid = TC_H_MAKE(sch->handle,0x8000);
+ classid = TC_H_MAKE(sch->handle, 0x8000);
- for (i=0; i<0x8000; i++) {
+ for (i = 0; i < 0x8000; i++) {
if (++q->hgenerator >= 0x8000)
q->hgenerator = 1;
if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
@@ -1890,11 +1896,11 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
cl->minidle = -0x7FFFFFFF;
cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
- if (cl->ewma_log==0)
+ if (cl->ewma_log == 0)
cl->ewma_log = q->link.ewma_log;
- if (cl->maxidle==0)
+ if (cl->maxidle == 0)
cl->maxidle = q->link.maxidle;
- if (cl->avpkt==0)
+ if (cl->avpkt == 0)
cl->avpkt = q->link.avpkt;
cl->overlimit = cbq_ovl_classic;
if (tb[TCA_CBQ_OVL_STRATEGY])
@@ -1920,7 +1926,7 @@ failure:
static int cbq_delete(struct Qdisc *sch, unsigned long arg)
{
struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *cl = (struct cbq_class*)arg;
+ struct cbq_class *cl = (struct cbq_class *)arg;
unsigned int qlen;
if (cl->filters || cl->children || cl == &q->link)
@@ -1978,7 +1984,7 @@ static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
struct cbq_sched_data *q = qdisc_priv(sch);
- struct cbq_class *p = (struct cbq_class*)parent;
+ struct cbq_class *p = (struct cbq_class *)parent;
struct cbq_class *cl = cbq_class_lookup(q, classid);
if (cl) {
@@ -1992,7 +1998,7 @@ static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
{
- struct cbq_class *cl = (struct cbq_class*)arg;
+ struct cbq_class *cl = (struct cbq_class *)arg;
cl->filters--;
}
@@ -2002,7 +2008,7 @@ static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl;
struct hlist_node *n;
- unsigned h;
+ unsigned int h;
if (arg->stop)
return;
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
new file mode 100644
index 0000000..ee1e209
--- /dev/null
+++ b/net/sched/sch_choke.c
@@ -0,0 +1,677 @@
+/*
+ * net/sched/sch_choke.c CHOKE scheduler
+ *
+ * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
+ * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/reciprocal_div.h>
+#include <linux/vmalloc.h>
+#include <net/pkt_sched.h>
+#include <net/inet_ecn.h>
+#include <net/red.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+
+/*
+ CHOKe stateless AQM for fair bandwidth allocation
+ =================================================
+
+ CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
+ unresponsive flows) is a variant of RED that penalizes misbehaving flows but
+ maintains no flow state. The difference from RED is an additional step
+ during the enqueuing process. If average queue size is over the
+ low threshold (qmin), a packet is chosen at random from the queue.
+ If both the new and chosen packet are from the same flow, both
+ are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
+ needs to access packets in queue randomly. It has a minimal class
+ interface to allow overriding the builtin flow classifier with
+ filters.
+
+ Source:
+ R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
+ Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
+ IEEE INFOCOM, 2000.
+
+ A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
+ Characteristics", IEEE/ACM Transactions on Networking, 2004
+
+ */
+
+/* Upper bound on size of sk_buff table (packets) */
+#define CHOKE_MAX_QUEUE (128*1024 - 1)
+
+struct choke_sched_data {
+/* Parameters */
+ u32 limit;
+ unsigned char flags;
+
+ struct red_parms parms;
+
+/* Variables */
+ struct tcf_proto *filter_list;
+ struct {
+ u32 prob_drop; /* Early probability drops */
+ u32 prob_mark; /* Early probability marks */
+ u32 forced_drop; /* Forced drops, qavg > max_thresh */
+ u32 forced_mark; /* Forced marks, qavg > max_thresh */
+ u32 pdrop; /* Drops due to queue limits */
+ u32 other; /* Drops due to drop() calls */
+ u32 matched; /* Drops to flow match */
+ } stats;
+
+ unsigned int head;
+ unsigned int tail;
+
+ unsigned int tab_mask; /* size - 1 */
+
+ struct sk_buff **tab;
+};
+
+/* deliver a random number between 0 and N - 1 */
+static u32 random_N(unsigned int N)
+{
+ return reciprocal_divide(random32(), N);
+}
+
+/* number of elements in queue including holes */
+static unsigned int choke_len(const struct choke_sched_data *q)
+{
+ return (q->tail - q->head) & q->tab_mask;
+}
+
+/* Is ECN parameter configured */
+static int use_ecn(const struct choke_sched_data *q)
+{
+ return q->flags & TC_RED_ECN;
+}
+
+/* Should packets over max just be dropped (versus marked) */
+static int use_harddrop(const struct choke_sched_data *q)
+{
+ return q->flags & TC_RED_HARDDROP;
+}
+
+/* Move head pointer forward to skip over holes */
+static void choke_zap_head_holes(struct choke_sched_data *q)
+{
+ do {
+ q->head = (q->head + 1) & q->tab_mask;
+ if (q->head == q->tail)
+ break;
+ } while (q->tab[q->head] == NULL);
+}
+
+/* Move tail pointer backwards to reuse holes */
+static void choke_zap_tail_holes(struct choke_sched_data *q)
+{
+ do {
+ q->tail = (q->tail - 1) & q->tab_mask;
+ if (q->head == q->tail)
+ break;
+ } while (q->tab[q->tail] == NULL);
+}
+
+/* Drop packet from queue array by creating a "hole" */
+static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb = q->tab[idx];
+
+ q->tab[idx] = NULL;
+
+ if (idx == q->head)
+ choke_zap_head_holes(q);
+ if (idx == q->tail)
+ choke_zap_tail_holes(q);
+
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_drop(skb, sch);
+ qdisc_tree_decrease_qlen(sch, 1);
+ --sch->q.qlen;
+}
+
+/*
+ * Compare flow of two packets
+ * Returns true only if source and destination address and port match.
+ * false for special cases
+ */
+static bool choke_match_flow(struct sk_buff *skb1,
+ struct sk_buff *skb2)
+{
+ int off1, off2, poff;
+ const u32 *ports1, *ports2;
+ u8 ip_proto;
+ __u32 hash1;
+
+ if (skb1->protocol != skb2->protocol)
+ return false;
+
+ /* Use hash value as quick check
+ * Assumes that __skb_get_rxhash makes IP header and ports linear
+ */
+ hash1 = skb_get_rxhash(skb1);
+ if (!hash1 || hash1 != skb_get_rxhash(skb2))
+ return false;
+
+ /* Probably match, but be sure to avoid hash collisions */
+ off1 = skb_network_offset(skb1);
+ off2 = skb_network_offset(skb2);
+
+ switch (skb1->protocol) {
+ case __constant_htons(ETH_P_IP): {
+ const struct iphdr *ip1, *ip2;
+
+ ip1 = (const struct iphdr *) (skb1->data + off1);
+ ip2 = (const struct iphdr *) (skb2->data + off2);
+
+ ip_proto = ip1->protocol;
+ if (ip_proto != ip2->protocol ||
+ ip1->saddr != ip2->saddr || ip1->daddr != ip2->daddr)
+ return false;
+
+ if ((ip1->frag_off | ip2->frag_off) & htons(IP_MF | IP_OFFSET))
+ ip_proto = 0;
+ off1 += ip1->ihl * 4;
+ off2 += ip2->ihl * 4;
+ break;
+ }
+
+ case __constant_htons(ETH_P_IPV6): {
+ const struct ipv6hdr *ip1, *ip2;
+
+ ip1 = (const struct ipv6hdr *) (skb1->data + off1);
+ ip2 = (const struct ipv6hdr *) (skb2->data + off2);
+
+ ip_proto = ip1->nexthdr;
+ if (ip_proto != ip2->nexthdr ||
+ ipv6_addr_cmp(&ip1->saddr, &ip2->saddr) ||
+ ipv6_addr_cmp(&ip1->daddr, &ip2->daddr))
+ return false;
+ off1 += 40;
+ off2 += 40;
+ }
+
+ default: /* Maybe compare MAC header here? */
+ return false;
+ }
+
+ poff = proto_ports_offset(ip_proto);
+ if (poff < 0)
+ return true;
+
+ off1 += poff;
+ off2 += poff;
+
+ ports1 = (__force u32 *)(skb1->data + off1);
+ ports2 = (__force u32 *)(skb2->data + off2);
+ return *ports1 == *ports2;
+}
+
+static inline void choke_set_classid(struct sk_buff *skb, u16 classid)
+{
+ *(unsigned int *)(qdisc_skb_cb(skb)->data) = classid;
+}
+
+static u16 choke_get_classid(const struct sk_buff *skb)
+{
+ return *(unsigned int *)(qdisc_skb_cb(skb)->data);
+}
+
+/*
+ * Classify flow using either:
+ * 1. pre-existing classification result in skb
+ * 2. fast internal classification
+ * 3. use TC filter based classification
+ */
+static bool choke_classify(struct sk_buff *skb,
+ struct Qdisc *sch, int *qerr)
+
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ struct tcf_result res;
+ int result;
+
+ result = tc_classify(skb, q->filter_list, &res);
+ if (result >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+ switch (result) {
+ case TC_ACT_STOLEN:
+ case TC_ACT_QUEUED:
+ *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+ case TC_ACT_SHOT:
+ return false;
+ }
+#endif
+ choke_set_classid(skb, TC_H_MIN(res.classid));
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Select a packet at random from queue
+ * HACK: since queue can have holes from previous deletion; retry several
+ * times to find a random skb but then just give up and return the head
+ * Will return NULL if queue is empty (q->head == q->tail)
+ */
+static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
+ unsigned int *pidx)
+{
+ struct sk_buff *skb;
+ int retrys = 3;
+
+ do {
+ *pidx = (q->head + random_N(choke_len(q))) & q->tab_mask;
+ skb = q->tab[*pidx];
+ if (skb)
+ return skb;
+ } while (--retrys > 0);
+
+ return q->tab[*pidx = q->head];
+}
+
+/*
+ * Compare new packet with random packet in queue
+ * returns true if matched and sets *pidx
+ */
+static bool choke_match_random(const struct choke_sched_data *q,
+ struct sk_buff *nskb,
+ unsigned int *pidx)
+{
+ struct sk_buff *oskb;
+
+ if (q->head == q->tail)
+ return false;
+
+ oskb = choke_peek_random(q, pidx);
+ if (q->filter_list)
+ return choke_get_classid(nskb) == choke_get_classid(oskb);
+
+ return choke_match_flow(oskb, nskb);
+}
+
+static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ struct red_parms *p = &q->parms;
+ int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+
+ if (q->filter_list) {
+ /* If using external classifiers, get result and record it. */
+ if (!choke_classify(skb, sch, &ret))
+ goto other_drop; /* Packet was eaten by filter */
+ }
+
+ /* Compute average queue usage (see RED) */
+ p->qavg = red_calc_qavg(p, sch->q.qlen);
+ if (red_is_idling(p))
+ red_end_of_idle_period(p);
+
+ /* Is queue small? */
+ if (p->qavg <= p->qth_min)
+ p->qcount = -1;
+ else {
+ unsigned int idx;
+
+ /* Draw a packet at random from queue and compare flow */
+ if (choke_match_random(q, skb, &idx)) {
+ q->stats.matched++;
+ choke_drop_by_idx(sch, idx);
+ goto congestion_drop;
+ }
+
+ /* Queue is large, always mark/drop */
+ if (p->qavg > p->qth_max) {
+ p->qcount = -1;
+
+ sch->qstats.overlimits++;
+ if (use_harddrop(q) || !use_ecn(q) ||
+ !INET_ECN_set_ce(skb)) {
+ q->stats.forced_drop++;
+ goto congestion_drop;
+ }
+
+ q->stats.forced_mark++;
+ } else if (++p->qcount) {
+ if (red_mark_probability(p, p->qavg)) {
+ p->qcount = 0;
+ p->qR = red_random(p);
+
+ sch->qstats.overlimits++;
+ if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
+ q->stats.prob_drop++;
+ goto congestion_drop;
+ }
+
+ q->stats.prob_mark++;
+ }
+ } else
+ p->qR = red_random(p);
+ }
+
+ /* Admit new packet */
+ if (sch->q.qlen < q->limit) {
+ q->tab[q->tail] = skb;
+ q->tail = (q->tail + 1) & q->tab_mask;
+ ++sch->q.qlen;
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+ return NET_XMIT_SUCCESS;
+ }
+
+ q->stats.pdrop++;
+ sch->qstats.drops++;
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+
+ congestion_drop:
+ qdisc_drop(skb, sch);
+ return NET_XMIT_CN;
+
+ other_drop:
+ if (ret & __NET_XMIT_BYPASS)
+ sch->qstats.drops++;
+ kfree_skb(skb);
+ return ret;
+}
+
+static struct sk_buff *choke_dequeue(struct Qdisc *sch)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+
+ if (q->head == q->tail) {
+ if (!red_is_idling(&q->parms))
+ red_start_of_idle_period(&q->parms);
+ return NULL;
+ }
+
+ skb = q->tab[q->head];
+ q->tab[q->head] = NULL;
+ choke_zap_head_holes(q);
+ --sch->q.qlen;
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_bstats_update(sch, skb);
+
+ return skb;
+}
+
+static unsigned int choke_drop(struct Qdisc *sch)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ unsigned int len;
+
+ len = qdisc_queue_drop(sch);
+ if (len > 0)
+ q->stats.other++;
+ else {
+ if (!red_is_idling(&q->parms))
+ red_start_of_idle_period(&q->parms);
+ }
+
+ return len;
+}
+
+static void choke_reset(struct Qdisc *sch)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+
+ red_restart(&q->parms);
+}
+
+static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
+ [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) },
+ [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE },
+};
+
+
+static void choke_free(void *addr)
+{
+ if (addr) {
+ if (is_vmalloc_addr(addr))
+ vfree(addr);
+ else
+ kfree(addr);
+ }
+}
+
+static int choke_change(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_CHOKE_MAX + 1];
+ const struct tc_red_qopt *ctl;
+ int err;
+ struct sk_buff **old = NULL;
+ unsigned int mask;
+
+ if (opt == NULL)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy);
+ if (err < 0)
+ return err;
+
+ if (tb[TCA_CHOKE_PARMS] == NULL ||
+ tb[TCA_CHOKE_STAB] == NULL)
+ return -EINVAL;
+
+ ctl = nla_data(tb[TCA_CHOKE_PARMS]);
+
+ if (ctl->limit > CHOKE_MAX_QUEUE)
+ return -EINVAL;
+
+ mask = roundup_pow_of_two(ctl->limit + 1) - 1;
+ if (mask != q->tab_mask) {
+ struct sk_buff **ntab;
+
+ ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
+ if (!ntab)
+ ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
+ if (!ntab)
+ return -ENOMEM;
+
+ sch_tree_lock(sch);
+ old = q->tab;
+ if (old) {
+ unsigned int oqlen = sch->q.qlen, tail = 0;
+
+ while (q->head != q->tail) {
+ struct sk_buff *skb = q->tab[q->head];
+
+ q->head = (q->head + 1) & q->tab_mask;
+ if (!skb)
+ continue;
+ if (tail < mask) {
+ ntab[tail++] = skb;
+ continue;
+ }
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ --sch->q.qlen;
+ qdisc_drop(skb, sch);
+ }
+ qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
+ q->head = 0;
+ q->tail = tail;
+ }
+
+ q->tab_mask = mask;
+ q->tab = ntab;
+ } else
+ sch_tree_lock(sch);
+
+ q->flags = ctl->flags;
+ q->limit = ctl->limit;
+
+ red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
+ ctl->Plog, ctl->Scell_log,
+ nla_data(tb[TCA_CHOKE_STAB]));
+
+ if (q->head == q->tail)
+ red_end_of_idle_period(&q->parms);
+
+ sch_tree_unlock(sch);
+ choke_free(old);
+ return 0;
+}
+
+static int choke_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ return choke_change(sch, opt);
+}
+
+static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ struct nlattr *opts = NULL;
+ struct tc_red_qopt opt = {
+ .limit = q->limit,
+ .flags = q->flags,
+ .qth_min = q->parms.qth_min >> q->parms.Wlog,
+ .qth_max = q->parms.qth_max >> q->parms.Wlog,
+ .Wlog = q->parms.Wlog,
+ .Plog = q->parms.Plog,
+ .Scell_log = q->parms.Scell_log,
+ };
+
+ opts = nla_nest_start(skb, TCA_OPTIONS);
+ if (opts == NULL)
+ goto nla_put_failure;
+
+ NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt);
+ return nla_nest_end(skb, opts);
+
+nla_put_failure:
+ nla_nest_cancel(skb, opts);
+ return -EMSGSIZE;
+}
+
+static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+ struct tc_choke_xstats st = {
+ .early = q->stats.prob_drop + q->stats.forced_drop,
+ .marked = q->stats.prob_mark + q->stats.forced_mark,
+ .pdrop = q->stats.pdrop,
+ .other = q->stats.other,
+ .matched = q->stats.matched,
+ };
+
+ return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static void choke_destroy(struct Qdisc *sch)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+
+ tcf_destroy_chain(&q->filter_list);
+ choke_free(q->tab);
+}
+
+static struct Qdisc *choke_leaf(struct Qdisc *sch, unsigned long arg)
+{
+ return NULL;
+}
+
+static unsigned long choke_get(struct Qdisc *sch, u32 classid)
+{
+ return 0;
+}
+
+static void choke_put(struct Qdisc *q, unsigned long cl)
+{
+}
+
+static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent,
+ u32 classid)
+{
+ return 0;
+}
+
+static struct tcf_proto **choke_find_tcf(struct Qdisc *sch, unsigned long cl)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+
+ if (cl)
+ return NULL;
+ return &q->filter_list;
+}
+
+static int choke_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sk_buff *skb, struct tcmsg *tcm)
+{
+ tcm->tcm_handle |= TC_H_MIN(cl);
+ return 0;
+}
+
+static void choke_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+ if (!arg->stop) {
+ if (arg->fn(sch, 1, arg) < 0) {
+ arg->stop = 1;
+ return;
+ }
+ arg->count++;
+ }
+}
+
+static const struct Qdisc_class_ops choke_class_ops = {
+ .leaf = choke_leaf,
+ .get = choke_get,
+ .put = choke_put,
+ .tcf_chain = choke_find_tcf,
+ .bind_tcf = choke_bind,
+ .unbind_tcf = choke_put,
+ .dump = choke_dump_class,
+ .walk = choke_walk,
+};
+
+static struct sk_buff *choke_peek_head(struct Qdisc *sch)
+{
+ struct choke_sched_data *q = qdisc_priv(sch);
+
+ return (q->head != q->tail) ? q->tab[q->head] : NULL;
+}
+
+static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
+ .id = "choke",
+ .priv_size = sizeof(struct choke_sched_data),
+
+ .enqueue = choke_enqueue,
+ .dequeue = choke_dequeue,
+ .peek = choke_peek_head,
+ .drop = choke_drop,
+ .init = choke_init,
+ .destroy = choke_destroy,
+ .reset = choke_reset,
+ .change = choke_change,
+ .dump = choke_dump,
+ .dump_stats = choke_dump_stats,
+ .owner = THIS_MODULE,
+};
+
+static int __init choke_module_init(void)
+{
+ return register_qdisc(&choke_qdisc_ops);
+}
+
+static void __exit choke_module_exit(void)
+{
+ unregister_qdisc(&choke_qdisc_ops);
+}
+
+module_init(choke_module_init)
+module_exit(choke_module_exit)
+
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 0f7bf3f..2c79020 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -137,10 +137,10 @@ static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
if (tb[TCA_DSMARK_VALUE])
- p->value[*arg-1] = nla_get_u8(tb[TCA_DSMARK_VALUE]);
+ p->value[*arg - 1] = nla_get_u8(tb[TCA_DSMARK_VALUE]);
if (tb[TCA_DSMARK_MASK])
- p->mask[*arg-1] = mask;
+ p->mask[*arg - 1] = mask;
err = 0;
@@ -155,8 +155,8 @@ static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
if (!dsmark_valid_index(p, arg))
return -EINVAL;
- p->mask[arg-1] = 0xff;
- p->value[arg-1] = 0;
+ p->mask[arg - 1] = 0xff;
+ p->value[arg - 1] = 0;
return 0;
}
@@ -175,7 +175,7 @@ static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
if (p->mask[i] == 0xff && !p->value[i])
goto ignore;
if (walker->count >= walker->skip) {
- if (walker->fn(sch, i+1, walker) < 0) {
+ if (walker->fn(sch, i + 1, walker) < 0) {
walker->stop = 1;
break;
}
@@ -304,9 +304,8 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
* and don't need yet another qdisc as a bypass.
*/
if (p->mask[index] != 0xff || p->value[index])
- printk(KERN_WARNING
- "dsmark_dequeue: unsupported protocol %d\n",
- ntohs(skb->protocol));
+ pr_warning("dsmark_dequeue: unsupported protocol %d\n",
+ ntohs(skb->protocol));
break;
}
@@ -424,14 +423,14 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
if (!dsmark_valid_index(p, cl))
return -EINVAL;
- tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl-1);
+ tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
tcm->tcm_info = p->q->handle;
opts = nla_nest_start(skb, TCA_OPTIONS);
if (opts == NULL)
goto nla_put_failure;
- NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl-1]);
- NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl-1]);
+ NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]);
+ NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]);
return nla_nest_end(skb, opts);
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index d468b47..be33f9d 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -19,12 +19,11 @@
/* 1 band FIFO pseudo-"scheduler" */
-struct fifo_sched_data
-{
+struct fifo_sched_data {
u32 limit;
};
-static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct fifo_sched_data *q = qdisc_priv(sch);
@@ -34,7 +33,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
return qdisc_reshape_fail(skb, sch);
}
-static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct fifo_sched_data *q = qdisc_priv(sch);
@@ -44,7 +43,7 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
return qdisc_reshape_fail(skb, sch);
}
-static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct fifo_sched_data *q = qdisc_priv(sch);
@@ -62,11 +61,13 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
{
struct fifo_sched_data *q = qdisc_priv(sch);
+ bool bypass;
+ bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
if (opt == NULL) {
u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
- if (sch->ops == &bfifo_qdisc_ops)
+ if (is_bfifo)
limit *= psched_mtu(qdisc_dev(sch));
q->limit = limit;
@@ -79,6 +80,15 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
q->limit = ctl->limit;
}
+ if (is_bfifo)
+ bypass = q->limit >= psched_mtu(qdisc_dev(sch));
+ else
+ bypass = q->limit >= 1;
+
+ if (bypass)
+ sch->flags |= TCQ_F_CAN_BYPASS;
+ else
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
return 0;
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 34dc598..0da09d5 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -87,8 +87,8 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
*/
kfree_skb(skb);
if (net_ratelimit())
- printk(KERN_WARNING "Dead loop on netdevice %s, "
- "fix it urgently!\n", dev_queue->dev->name);
+ pr_warning("Dead loop on netdevice %s, fix it urgently!\n",
+ dev_queue->dev->name);
ret = qdisc_qlen(q);
} else {
/*
@@ -137,8 +137,8 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
} else {
/* Driver returned NETDEV_TX_BUSY - requeue skb */
if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
- printk(KERN_WARNING "BUG %s code %d qlen %d\n",
- dev->name, ret, q->q.qlen);
+ pr_warning("BUG %s code %d qlen %d\n",
+ dev->name, ret, q->q.qlen);
ret = dev_requeue_skb(skb, q);
}
@@ -412,8 +412,9 @@ static struct Qdisc noqueue_qdisc = {
};
-static const u8 prio2band[TC_PRIO_MAX+1] =
- { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
+static const u8 prio2band[TC_PRIO_MAX + 1] = {
+ 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
+};
/* 3-band FIFO queue: old style, but should be a bit faster than
generic prio+fifo combination.
@@ -445,7 +446,7 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
return priv->q + band;
}
-static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
+static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc)
{
if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
int band = prio2band[skb->priority & TC_PRIO_MAX];
@@ -460,7 +461,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
return qdisc_drop(skb, qdisc);
}
-static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
+static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
{
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
int band = bitmap2band[priv->bitmap];
@@ -479,7 +480,7 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
return NULL;
}
-static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
+static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
{
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
int band = bitmap2band[priv->bitmap];
@@ -493,7 +494,7 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
return NULL;
}
-static void pfifo_fast_reset(struct Qdisc* qdisc)
+static void pfifo_fast_reset(struct Qdisc *qdisc)
{
int prio;
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
@@ -510,7 +511,7 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
{
struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
- memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
+ memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
return skb->len;
@@ -526,6 +527,8 @@ static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
skb_queue_head_init(band2list(priv, prio));
+ /* Can by-pass the queue discipline */
+ qdisc->flags |= TCQ_F_CAN_BYPASS;
return 0;
}
@@ -540,6 +543,7 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
.dump = pfifo_fast_dump,
.owner = THIS_MODULE,
};
+EXPORT_SYMBOL(pfifo_fast_ops);
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
struct Qdisc_ops *ops)
@@ -630,7 +634,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
#ifdef CONFIG_NET_SCHED
qdisc_list_del(qdisc);
- qdisc_put_stab(qdisc->stab);
+ qdisc_put_stab(rtnl_dereference(qdisc->stab));
#endif
gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
if (ops->reset)
@@ -674,25 +678,21 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
return oqdisc;
}
+EXPORT_SYMBOL(dev_graft_qdisc);
static void attach_one_default_qdisc(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_unused)
{
- struct Qdisc *qdisc;
+ struct Qdisc *qdisc = &noqueue_qdisc;
if (dev->tx_queue_len) {
qdisc = qdisc_create_dflt(dev_queue,
&pfifo_fast_ops, TC_H_ROOT);
if (!qdisc) {
- printk(KERN_INFO "%s: activation failed\n", dev->name);
+ netdev_info(dev, "activation failed\n");
return;
}
-
- /* Can by-pass the queue discipline for default qdisc */
- qdisc->flags |= TCQ_F_CAN_BYPASS;
- } else {
- qdisc = &noqueue_qdisc;
}
dev_queue->qdisc_sleeping = qdisc;
}
@@ -761,6 +761,7 @@ void dev_activate(struct net_device *dev)
dev_watchdog_up(dev);
}
}
+EXPORT_SYMBOL(dev_activate);
static void dev_deactivate_queue(struct net_device *dev,
struct netdev_queue *dev_queue,
@@ -840,6 +841,7 @@ void dev_deactivate(struct net_device *dev)
list_add(&dev->unreg_list, &single);
dev_deactivate_many(&single);
}
+EXPORT_SYMBOL(dev_deactivate);
static void dev_init_scheduler_queue(struct net_device *dev,
struct netdev_queue *dev_queue,
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 51dcc2a..b9493a0 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -32,8 +32,7 @@
struct gred_sched_data;
struct gred_sched;
-struct gred_sched_data
-{
+struct gred_sched_data {
u32 limit; /* HARD maximal queue length */
u32 DP; /* the drop pramaters */
u32 bytesin; /* bytes seen on virtualQ so far*/
@@ -50,8 +49,7 @@ enum {
GRED_RIO_MODE,
};
-struct gred_sched
-{
+struct gred_sched {
struct gred_sched_data *tab[MAX_DPs];
unsigned long flags;
u32 red_flags;
@@ -150,17 +148,18 @@ static inline int gred_use_harddrop(struct gred_sched *t)
return t->red_flags & TC_RED_HARDDROP;
}
-static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct gred_sched_data *q=NULL;
- struct gred_sched *t= qdisc_priv(sch);
+ struct gred_sched_data *q = NULL;
+ struct gred_sched *t = qdisc_priv(sch);
unsigned long qavg = 0;
u16 dp = tc_index_to_dp(skb);
- if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
+ if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
dp = t->def;
- if ((q = t->tab[dp]) == NULL) {
+ q = t->tab[dp];
+ if (!q) {
/* Pass through packets not assigned to a DP
* if no default DP has been configured. This
* allows for DP flows to be left untouched.
@@ -183,7 +182,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
for (i = 0; i < t->DPs; i++) {
if (t->tab[i] && t->tab[i]->prio < q->prio &&
!red_is_idling(&t->tab[i]->parms))
- qavg +=t->tab[i]->parms.qavg;
+ qavg += t->tab[i]->parms.qavg;
}
}
@@ -203,28 +202,28 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
gred_store_wred_set(t, q);
switch (red_action(&q->parms, q->parms.qavg + qavg)) {
- case RED_DONT_MARK:
- break;
-
- case RED_PROB_MARK:
- sch->qstats.overlimits++;
- if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
- q->stats.prob_drop++;
- goto congestion_drop;
- }
-
- q->stats.prob_mark++;
- break;
-
- case RED_HARD_MARK:
- sch->qstats.overlimits++;
- if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
- !INET_ECN_set_ce(skb)) {
- q->stats.forced_drop++;
- goto congestion_drop;
- }
- q->stats.forced_mark++;
- break;
+ case RED_DONT_MARK:
+ break;
+
+ case RED_PROB_MARK:
+ sch->qstats.overlimits++;
+ if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
+ q->stats.prob_drop++;
+ goto congestion_drop;
+ }
+
+ q->stats.prob_mark++;
+ break;
+
+ case RED_HARD_MARK:
+ sch->qstats.overlimits++;
+ if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
+ !INET_ECN_set_ce(skb)) {
+ q->stats.forced_drop++;
+ goto congestion_drop;
+ }
+ q->stats.forced_mark++;
+ break;
}
if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
@@ -241,7 +240,7 @@ congestion_drop:
return NET_XMIT_CN;
}
-static struct sk_buff *gred_dequeue(struct Qdisc* sch)
+static struct sk_buff *gred_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb;
struct gred_sched *t = qdisc_priv(sch);
@@ -254,9 +253,9 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch)
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
if (net_ratelimit())
- printk(KERN_WARNING "GRED: Unable to relocate "
- "VQ 0x%x after dequeue, screwing up "
- "backlog.\n", tc_index_to_dp(skb));
+ pr_warning("GRED: Unable to relocate VQ 0x%x "
+ "after dequeue, screwing up "
+ "backlog.\n", tc_index_to_dp(skb));
} else {
q->backlog -= qdisc_pkt_len(skb);
@@ -273,7 +272,7 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch)
return NULL;
}
-static unsigned int gred_drop(struct Qdisc* sch)
+static unsigned int gred_drop(struct Qdisc *sch)
{
struct sk_buff *skb;
struct gred_sched *t = qdisc_priv(sch);
@@ -286,9 +285,9 @@ static unsigned int gred_drop(struct Qdisc* sch)
if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
if (net_ratelimit())
- printk(KERN_WARNING "GRED: Unable to relocate "
- "VQ 0x%x while dropping, screwing up "
- "backlog.\n", tc_index_to_dp(skb));
+ pr_warning("GRED: Unable to relocate VQ 0x%x "
+ "while dropping, screwing up "
+ "backlog.\n", tc_index_to_dp(skb));
} else {
q->backlog -= len;
q->stats.other++;
@@ -308,7 +307,7 @@ static unsigned int gred_drop(struct Qdisc* sch)
}
-static void gred_reset(struct Qdisc* sch)
+static void gred_reset(struct Qdisc *sch)
{
int i;
struct gred_sched *t = qdisc_priv(sch);
@@ -369,8 +368,8 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
for (i = table->DPs; i < MAX_DPs; i++) {
if (table->tab[i]) {
- printk(KERN_WARNING "GRED: Warning: Destroying "
- "shadowed VQ 0x%x\n", i);
+ pr_warning("GRED: Warning: Destroying "
+ "shadowed VQ 0x%x\n", i);
gred_destroy_vq(table->tab[i]);
table->tab[i] = NULL;
}
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 14a799de..6488e64 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -81,8 +81,7 @@
* that are expensive on 32-bit architectures.
*/
-struct internal_sc
-{
+struct internal_sc {
u64 sm1; /* scaled slope of the 1st segment */
u64 ism1; /* scaled inverse-slope of the 1st segment */
u64 dx; /* the x-projection of the 1st segment */
@@ -92,8 +91,7 @@ struct internal_sc
};
/* runtime service curve */
-struct runtime_sc
-{
+struct runtime_sc {
u64 x; /* current starting position on x-axis */
u64 y; /* current starting position on y-axis */
u64 sm1; /* scaled slope of the 1st segment */
@@ -104,15 +102,13 @@ struct runtime_sc
u64 ism2; /* scaled inverse-slope of the 2nd segment */
};
-enum hfsc_class_flags
-{
+enum hfsc_class_flags {
HFSC_RSC = 0x1,
HFSC_FSC = 0x2,
HFSC_USC = 0x4
};
-struct hfsc_class
-{
+struct hfsc_class {
struct Qdisc_class_common cl_common;
unsigned int refcnt; /* usage count */
@@ -140,8 +136,8 @@ struct hfsc_class
u64 cl_cumul; /* cumulative work in bytes done by
real-time criteria */
- u64 cl_d; /* deadline*/
- u64 cl_e; /* eligible time */
+ u64 cl_d; /* deadline*/
+ u64 cl_e; /* eligible time */
u64 cl_vt; /* virtual time */
u64 cl_f; /* time when this class will fit for
link-sharing, max(myf, cfmin) */
@@ -176,8 +172,7 @@ struct hfsc_class
unsigned long cl_nactive; /* number of active children */
};
-struct hfsc_sched
-{
+struct hfsc_sched {
u16 defcls; /* default class id */
struct hfsc_class root; /* root class */
struct Qdisc_class_hash clhash; /* class hash */
@@ -693,7 +688,7 @@ init_vf(struct hfsc_class *cl, unsigned int len)
if (go_active) {
n = rb_last(&cl->cl_parent->vt_tree);
if (n != NULL) {
- max_cl = rb_entry(n, struct hfsc_class,vt_node);
+ max_cl = rb_entry(n, struct hfsc_class, vt_node);
/*
* set vt to the average of the min and max
* classes. if the parent's period didn't
@@ -1177,8 +1172,10 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
return NULL;
}
#endif
- if ((cl = (struct hfsc_class *)res.class) == NULL) {
- if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
+ cl = (struct hfsc_class *)res.class;
+ if (!cl) {
+ cl = hfsc_find_class(res.classid, sch);
+ if (!cl)
break; /* filter selected invalid classid */
if (cl->level >= head->level)
break; /* filter may only point downwards */
@@ -1316,7 +1313,7 @@ hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
return -1;
}
-static inline int
+static int
hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
{
if ((cl->cl_flags & HFSC_RSC) &&
@@ -1420,7 +1417,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch)
struct hfsc_class *cl;
u64 next_time = 0;
- if ((cl = eltree_get_minel(q)) != NULL)
+ cl = eltree_get_minel(q);
+ if (cl)
next_time = cl->cl_e;
if (q->root.cl_cfmin != 0) {
if (next_time == 0 || next_time > q->root.cl_cfmin)
@@ -1625,7 +1623,8 @@ hfsc_dequeue(struct Qdisc *sch)
* find the class with the minimum deadline among
* the eligible classes.
*/
- if ((cl = eltree_get_mindl(q, cur_time)) != NULL) {
+ cl = eltree_get_mindl(q, cur_time);
+ if (cl) {
realtime = 1;
} else {
/*
@@ -1664,7 +1663,7 @@ hfsc_dequeue(struct Qdisc *sch)
set_passive(cl);
}
- sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb);
sch->q.qlen--;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index fc12fe6..e1429a8 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -99,9 +99,10 @@ struct htb_class {
struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
/* When class changes from state 1->2 and disconnects from
- parent's feed then we lost ptr value and start from the
- first child again. Here we store classid of the
- last valid ptr (used when ptr is NULL). */
+ * parent's feed then we lost ptr value and start from the
+ * first child again. Here we store classid of the
+ * last valid ptr (used when ptr is NULL).
+ */
u32 last_ptr_id[TC_HTB_NUMPRIO];
} inner;
} un;
@@ -185,7 +186,7 @@ static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
* have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
* then finish and return direct queue.
*/
-#define HTB_DIRECT (struct htb_class*)-1
+#define HTB_DIRECT ((struct htb_class *)-1L)
static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
int *qerr)
@@ -197,11 +198,13 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
int result;
/* allow to select class by setting skb->priority to valid classid;
- note that nfmark can be used too by attaching filter fw with no
- rules in it */
+ * note that nfmark can be used too by attaching filter fw with no
+ * rules in it
+ */
if (skb->priority == sch->handle)
return HTB_DIRECT; /* X:0 (direct flow) selected */
- if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
+ cl = htb_find(skb->priority, sch);
+ if (cl && cl->level == 0)
return cl;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
@@ -216,10 +219,12 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
return NULL;
}
#endif
- if ((cl = (void *)res.class) == NULL) {
+ cl = (void *)res.class;
+ if (!cl) {
if (res.classid == sch->handle)
return HTB_DIRECT; /* X:0 (direct flow) */
- if ((cl = htb_find(res.classid, sch)) == NULL)
+ cl = htb_find(res.classid, sch);
+ if (!cl)
break; /* filter selected invalid classid */
}
if (!cl->level)
@@ -378,7 +383,8 @@ static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
if (p->un.inner.feed[prio].rb_node)
/* parent already has its feed in use so that
- reset bit in mask as parent is already ok */
+ * reset bit in mask as parent is already ok
+ */
mask &= ~(1 << prio);
htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
@@ -413,8 +419,9 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
if (p->un.inner.ptr[prio] == cl->node + prio) {
/* we are removing child which is pointed to from
- parent feed - forget the pointer but remember
- classid */
+ * parent feed - forget the pointer but remember
+ * classid
+ */
p->un.inner.last_ptr_id[prio] = cl->common.classid;
p->un.inner.ptr[prio] = NULL;
}
@@ -663,8 +670,9 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
unsigned long start)
{
/* don't run for longer than 2 jiffies; 2 is used instead of
- 1 to simplify things when jiffy is going to be incremented
- too soon */
+ * 1 to simplify things when jiffy is going to be incremented
+ * too soon
+ */
unsigned long stop_at = start + 2;
while (time_before(jiffies, stop_at)) {
struct htb_class *cl;
@@ -687,7 +695,7 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
/* too much load - let's continue after a break for scheduling */
if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
- printk(KERN_WARNING "htb: too many events!\n");
+ pr_warning("htb: too many events!\n");
q->warned |= HTB_WARN_TOOMANYEVENTS;
}
@@ -695,7 +703,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level,
}
/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
- is no such one exists. */
+ * is no such one exists.
+ */
static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
u32 id)
{
@@ -739,12 +748,14 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
for (i = 0; i < 65535; i++) {
if (!*sp->pptr && *sp->pid) {
/* ptr was invalidated but id is valid - try to recover
- the original or next ptr */
+ * the original or next ptr
+ */
*sp->pptr =
htb_id_find_next_upper(prio, sp->root, *sp->pid);
}
*sp->pid = 0; /* ptr is valid now so that remove this hint as it
- can become out of date quickly */
+ * can become out of date quickly
+ */
if (!*sp->pptr) { /* we are at right end; rewind & go up */
*sp->pptr = sp->root;
while ((*sp->pptr)->rb_left)
@@ -772,7 +783,8 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
}
/* dequeues packet at given priority and level; call only if
- you are sure that there is active class at prio/level */
+ * you are sure that there is active class at prio/level
+ */
static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
int level)
{
@@ -789,9 +801,10 @@ next:
return NULL;
/* class can be empty - it is unlikely but can be true if leaf
- qdisc drops packets in enqueue routine or if someone used
- graft operation on the leaf since last dequeue;
- simply deactivate and skip such class */
+ * qdisc drops packets in enqueue routine or if someone used
+ * graft operation on the leaf since last dequeue;
+ * simply deactivate and skip such class
+ */
if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
struct htb_class *next;
htb_deactivate(q, cl);
@@ -831,7 +844,8 @@ next:
ptr[0]) + prio);
}
/* this used to be after charge_class but this constelation
- gives us slightly better performance */
+ * gives us slightly better performance
+ */
if (!cl->un.leaf.q->q.qlen)
htb_deactivate(q, cl);
htb_charge_class(q, cl, level, skb);
@@ -852,7 +866,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
if (skb != NULL) {
ok:
qdisc_bstats_update(sch, skb);
- sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_unthrottled(sch);
sch->q.qlen--;
return skb;
}
@@ -883,6 +897,7 @@ ok:
m = ~q->row_mask[level];
while (m != (int)(-1)) {
int prio = ffz(m);
+
m |= 1 << prio;
skb = htb_dequeue_tree(q, prio, level);
if (likely(skb != NULL))
@@ -987,13 +1002,12 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
return err;
if (tb[TCA_HTB_INIT] == NULL) {
- printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
+ pr_err("HTB: hey probably you have bad tc tool ?\n");
return -EINVAL;
}
gopt = nla_data(tb[TCA_HTB_INIT]);
if (gopt->version != HTB_VER >> 16) {
- printk(KERN_ERR
- "HTB: need tc/htb version %d (minor is %d), you have %d\n",
+ pr_err("HTB: need tc/htb version %d (minor is %d), you have %d\n",
HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
return -EINVAL;
}
@@ -1206,9 +1220,10 @@ static void htb_destroy(struct Qdisc *sch)
cancel_work_sync(&q->work);
qdisc_watchdog_cancel(&q->watchdog);
/* This line used to be after htb_destroy_class call below
- and surprisingly it worked in 2.4. But it must precede it
- because filter need its target class alive to be able to call
- unbind_filter on it (without Oops). */
+ * and surprisingly it worked in 2.4. But it must precede it
+ * because filter need its target class alive to be able to call
+ * unbind_filter on it (without Oops).
+ */
tcf_destroy_chain(&q->filter_list);
for (i = 0; i < q->clhash.hashsize; i++) {
@@ -1342,11 +1357,12 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
/* check maximal depth */
if (parent && parent->parent && parent->parent->level < 2) {
- printk(KERN_ERR "htb: tree is too deep\n");
+ pr_err("htb: tree is too deep\n");
goto failure;
}
err = -ENOBUFS;
- if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
+ cl = kzalloc(sizeof(*cl), GFP_KERNEL);
+ if (!cl)
goto failure;
err = gen_new_estimator(&cl->bstats, &cl->rate_est,
@@ -1366,8 +1382,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
RB_CLEAR_NODE(&cl->node[prio]);
/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
- so that can't be used inside of sch_tree_lock
- -- thanks to Karlis Peisenieks */
+ * so that can't be used inside of sch_tree_lock
+ * -- thanks to Karlis Peisenieks
+ */
new_q = qdisc_create_dflt(sch->dev_queue,
&pfifo_qdisc_ops, classid);
sch_tree_lock(sch);
@@ -1419,17 +1436,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
}
/* it used to be a nasty bug here, we have to check that node
- is really leaf before changing cl->un.leaf ! */
+ * is really leaf before changing cl->un.leaf !
+ */
if (!cl->level) {
cl->quantum = rtab->rate.rate / q->rate2quantum;
if (!hopt->quantum && cl->quantum < 1000) {
- printk(KERN_WARNING
+ pr_warning(
"HTB: quantum of class %X is small. Consider r2q change.\n",
cl->common.classid);
cl->quantum = 1000;
}
if (!hopt->quantum && cl->quantum > 200000) {
- printk(KERN_WARNING
+ pr_warning(
"HTB: quantum of class %X is big. Consider r2q change.\n",
cl->common.classid);
cl->quantum = 200000;
@@ -1478,13 +1496,13 @@ static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
struct htb_class *cl = htb_find(classid, sch);
/*if (cl && !cl->level) return 0;
- The line above used to be there to prevent attaching filters to
- leaves. But at least tc_index filter uses this just to get class
- for other reasons so that we have to allow for it.
- ----
- 19.6.2002 As Werner explained it is ok - bind filter is just
- another way to "lock" the class - unlike "get" this lock can
- be broken by class during destroy IIUC.
+ * The line above used to be there to prevent attaching filters to
+ * leaves. But at least tc_index filter uses this just to get class
+ * for other reasons so that we have to allow for it.
+ * ----
+ * 19.6.2002 As Werner explained it is ok - bind filter is just
+ * another way to "lock" the class - unlike "get" this lock can
+ * be broken by class during destroy IIUC.
*/
if (cl)
cl->filter_cnt++;
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index ecc302f..ec5cbc8 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -61,7 +61,6 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
TC_H_MIN(ntx + 1)));
if (qdisc == NULL)
goto err;
- qdisc->flags |= TCQ_F_CAN_BYPASS;
priv->qdiscs[ntx] = qdisc;
}
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
new file mode 100644
index 0000000..ace37f9
--- /dev/null
+++ b/net/sched/sch_mqprio.c
@@ -0,0 +1,416 @@
+/*
+ * net/sched/sch_mqprio.c
+ *
+ * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <net/sch_generic.h>
+
+struct mqprio_sched {
+ struct Qdisc **qdiscs;
+ int hw_owned;
+};
+
+static void mqprio_destroy(struct Qdisc *sch)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct mqprio_sched *priv = qdisc_priv(sch);
+ unsigned int ntx;
+
+ if (priv->qdiscs) {
+ for (ntx = 0;
+ ntx < dev->num_tx_queues && priv->qdiscs[ntx];
+ ntx++)
+ qdisc_destroy(priv->qdiscs[ntx]);
+ kfree(priv->qdiscs);
+ }
+
+ if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc)
+ dev->netdev_ops->ndo_setup_tc(dev, 0);
+ else
+ netdev_set_num_tc(dev, 0);
+}
+
+static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
+{
+ int i, j;
+
+ /* Verify num_tc is not out of max range */
+ if (qopt->num_tc > TC_MAX_QUEUE)
+ return -EINVAL;
+
+ /* Verify priority mapping uses valid tcs */
+ for (i = 0; i < TC_BITMASK + 1; i++) {
+ if (qopt->prio_tc_map[i] >= qopt->num_tc)
+ return -EINVAL;
+ }
+
+ /* net_device does not support requested operation */
+ if (qopt->hw && !dev->netdev_ops->ndo_setup_tc)
+ return -EINVAL;
+
+ /* if hw owned qcount and qoffset are taken from LLD so
+ * no reason to verify them here
+ */
+ if (qopt->hw)
+ return 0;
+
+ for (i = 0; i < qopt->num_tc; i++) {
+ unsigned int last = qopt->offset[i] + qopt->count[i];
+
+ /* Verify the queue count is in tx range being equal to the
+ * real_num_tx_queues indicates the last queue is in use.
+ */
+ if (qopt->offset[i] >= dev->real_num_tx_queues ||
+ !qopt->count[i] ||
+ last > dev->real_num_tx_queues)
+ return -EINVAL;
+
+ /* Verify that the offset and counts do not overlap */
+ for (j = i + 1; j < qopt->num_tc; j++) {
+ if (last > qopt->offset[j])
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct mqprio_sched *priv = qdisc_priv(sch);
+ struct netdev_queue *dev_queue;
+ struct Qdisc *qdisc;
+ int i, err = -EOPNOTSUPP;
+ struct tc_mqprio_qopt *qopt = NULL;
+
+ BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
+ BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
+
+ if (sch->parent != TC_H_ROOT)
+ return -EOPNOTSUPP;
+
+ if (!netif_is_multiqueue(dev))
+ return -EOPNOTSUPP;
+
+ if (nla_len(opt) < sizeof(*qopt))
+ return -EINVAL;
+
+ qopt = nla_data(opt);
+ if (mqprio_parse_opt(dev, qopt))
+ return -EINVAL;
+
+ /* pre-allocate qdisc, attachment can't fail */
+ priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
+ GFP_KERNEL);
+ if (priv->qdiscs == NULL) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ dev_queue = netdev_get_tx_queue(dev, i);
+ qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops,
+ TC_H_MAKE(TC_H_MAJ(sch->handle),
+ TC_H_MIN(i + 1)));
+ if (qdisc == NULL) {
+ err = -ENOMEM;
+ goto err;
+ }
+ priv->qdiscs[i] = qdisc;
+ }
+
+ /* If the mqprio options indicate that hardware should own
+ * the queue mapping then run ndo_setup_tc otherwise use the
+ * supplied and verified mapping
+ */
+ if (qopt->hw) {
+ priv->hw_owned = 1;
+ err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc);
+ if (err)
+ goto err;
+ } else {
+ netdev_set_num_tc(dev, qopt->num_tc);
+ for (i = 0; i < qopt->num_tc; i++)
+ netdev_set_tc_queue(dev, i,
+ qopt->count[i], qopt->offset[i]);
+ }
+
+ /* Always use supplied priority mappings */
+ for (i = 0; i < TC_BITMASK + 1; i++)
+ netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
+
+ sch->flags |= TCQ_F_MQROOT;
+ return 0;
+
+err:
+ mqprio_destroy(sch);
+ return err;
+}
+
+static void mqprio_attach(struct Qdisc *sch)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct mqprio_sched *priv = qdisc_priv(sch);
+ struct Qdisc *qdisc;
+ unsigned int ntx;
+
+ /* Attach underlying qdisc */
+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+ qdisc = priv->qdiscs[ntx];
+ qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
+ if (qdisc)
+ qdisc_destroy(qdisc);
+ }
+ kfree(priv->qdiscs);
+ priv->qdiscs = NULL;
+}
+
+static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
+ unsigned long cl)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ unsigned long ntx = cl - 1 - netdev_get_num_tc(dev);
+
+ if (ntx >= dev->num_tx_queues)
+ return NULL;
+ return netdev_get_tx_queue(dev, ntx);
+}
+
+static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
+ struct Qdisc **old)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
+
+ if (!dev_queue)
+ return -EINVAL;
+
+ if (dev->flags & IFF_UP)
+ dev_deactivate(dev);
+
+ *old = dev_graft_qdisc(dev_queue, new);
+
+ if (dev->flags & IFF_UP)
+ dev_activate(dev);
+
+ return 0;
+}
+
+static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct mqprio_sched *priv = qdisc_priv(sch);
+ unsigned char *b = skb_tail_pointer(skb);
+ struct tc_mqprio_qopt opt = { 0 };
+ struct Qdisc *qdisc;
+ unsigned int i;
+
+ sch->q.qlen = 0;
+ memset(&sch->bstats, 0, sizeof(sch->bstats));
+ memset(&sch->qstats, 0, sizeof(sch->qstats));
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+ spin_lock_bh(qdisc_lock(qdisc));
+ sch->q.qlen += qdisc->q.qlen;
+ sch->bstats.bytes += qdisc->bstats.bytes;
+ sch->bstats.packets += qdisc->bstats.packets;
+ sch->qstats.qlen += qdisc->qstats.qlen;
+ sch->qstats.backlog += qdisc->qstats.backlog;
+ sch->qstats.drops += qdisc->qstats.drops;
+ sch->qstats.requeues += qdisc->qstats.requeues;
+ sch->qstats.overlimits += qdisc->qstats.overlimits;
+ spin_unlock_bh(qdisc_lock(qdisc));
+ }
+
+ opt.num_tc = netdev_get_num_tc(dev);
+ memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
+ opt.hw = priv->hw_owned;
+
+ for (i = 0; i < netdev_get_num_tc(dev); i++) {
+ opt.count[i] = dev->tc_to_txq[i].count;
+ opt.offset[i] = dev->tc_to_txq[i].offset;
+ }
+
+ NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+
+ return skb->len;
+nla_put_failure:
+ nlmsg_trim(skb, b);
+ return -1;
+}
+
+static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
+{
+ struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
+
+ if (!dev_queue)
+ return NULL;
+
+ return dev_queue->qdisc_sleeping;
+}
+
+static unsigned long mqprio_get(struct Qdisc *sch, u32 classid)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ unsigned int ntx = TC_H_MIN(classid);
+
+ if (ntx > dev->num_tx_queues + netdev_get_num_tc(dev))
+ return 0;
+ return ntx;
+}
+
+static void mqprio_put(struct Qdisc *sch, unsigned long cl)
+{
+}
+
+static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sk_buff *skb, struct tcmsg *tcm)
+{
+ struct net_device *dev = qdisc_dev(sch);
+
+ if (cl <= netdev_get_num_tc(dev)) {
+ tcm->tcm_parent = TC_H_ROOT;
+ tcm->tcm_info = 0;
+ } else {
+ int i;
+ struct netdev_queue *dev_queue;
+
+ dev_queue = mqprio_queue_get(sch, cl);
+ tcm->tcm_parent = 0;
+ for (i = 0; i < netdev_get_num_tc(dev); i++) {
+ struct netdev_tc_txq tc = dev->tc_to_txq[i];
+ int q_idx = cl - netdev_get_num_tc(dev);
+
+ if (q_idx > tc.offset &&
+ q_idx <= tc.offset + tc.count) {
+ tcm->tcm_parent =
+ TC_H_MAKE(TC_H_MAJ(sch->handle),
+ TC_H_MIN(i + 1));
+ break;
+ }
+ }
+ tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
+ }
+ tcm->tcm_handle |= TC_H_MIN(cl);
+ return 0;
+}
+
+static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+ struct gnet_dump *d)
+{
+ struct net_device *dev = qdisc_dev(sch);
+
+ if (cl <= netdev_get_num_tc(dev)) {
+ int i;
+ struct Qdisc *qdisc;
+ struct gnet_stats_queue qstats = {0};
+ struct gnet_stats_basic_packed bstats = {0};
+ struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1];
+
+ /* Drop lock here it will be reclaimed before touching
+ * statistics this is required because the d->lock we
+ * hold here is the look on dev_queue->qdisc_sleeping
+ * also acquired below.
+ */
+ spin_unlock_bh(d->lock);
+
+ for (i = tc.offset; i < tc.offset + tc.count; i++) {
+ qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+ spin_lock_bh(qdisc_lock(qdisc));
+ bstats.bytes += qdisc->bstats.bytes;
+ bstats.packets += qdisc->bstats.packets;
+ qstats.qlen += qdisc->qstats.qlen;
+ qstats.backlog += qdisc->qstats.backlog;
+ qstats.drops += qdisc->qstats.drops;
+ qstats.requeues += qdisc->qstats.requeues;
+ qstats.overlimits += qdisc->qstats.overlimits;
+ spin_unlock_bh(qdisc_lock(qdisc));
+ }
+ /* Reclaim root sleeping lock before completing stats */
+ spin_lock_bh(d->lock);
+ if (gnet_stats_copy_basic(d, &bstats) < 0 ||
+ gnet_stats_copy_queue(d, &qstats) < 0)
+ return -1;
+ } else {
+ struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
+
+ sch = dev_queue->qdisc_sleeping;
+ sch->qstats.qlen = sch->q.qlen;
+ if (gnet_stats_copy_basic(d, &sch->bstats) < 0 ||
+ gnet_stats_copy_queue(d, &sch->qstats) < 0)
+ return -1;
+ }
+ return 0;
+}
+
+static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ unsigned long ntx;
+
+ if (arg->stop)
+ return;
+
+ /* Walk hierarchy with a virtual class per tc */
+ arg->count = arg->skip;
+ for (ntx = arg->skip;
+ ntx < dev->num_tx_queues + netdev_get_num_tc(dev);
+ ntx++) {
+ if (arg->fn(sch, ntx + 1, arg) < 0) {
+ arg->stop = 1;
+ break;
+ }
+ arg->count++;
+ }
+}
+
+static const struct Qdisc_class_ops mqprio_class_ops = {
+ .graft = mqprio_graft,
+ .leaf = mqprio_leaf,
+ .get = mqprio_get,
+ .put = mqprio_put,
+ .walk = mqprio_walk,
+ .dump = mqprio_dump_class,
+ .dump_stats = mqprio_dump_class_stats,
+};
+
+struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
+ .cl_ops = &mqprio_class_ops,
+ .id = "mqprio",
+ .priv_size = sizeof(struct mqprio_sched),
+ .init = mqprio_init,
+ .destroy = mqprio_destroy,
+ .attach = mqprio_attach,
+ .dump = mqprio_dump,
+ .owner = THIS_MODULE,
+};
+
+static int __init mqprio_module_init(void)
+{
+ return register_qdisc(&mqprio_qdisc_ops);
+}
+
+static void __exit mqprio_module_exit(void)
+{
+ unregister_qdisc(&mqprio_qdisc_ops);
+}
+
+module_init(mqprio_module_init);
+module_exit(mqprio_module_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 436a2e7..edc1950 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -156,7 +156,7 @@ static unsigned int multiq_drop(struct Qdisc *sch)
unsigned int len;
struct Qdisc *qdisc;
- for (band = q->bands-1; band >= 0; band--) {
+ for (band = q->bands - 1; band >= 0; band--) {
qdisc = q->queues[band];
if (qdisc->ops->drop) {
len = qdisc->ops->drop(qdisc);
@@ -265,7 +265,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
for (i = 0; i < q->max_bands; i++)
q->queues[i] = &noop_qdisc;
- err = multiq_tune(sch,opt);
+ err = multiq_tune(sch, opt);
if (err)
kfree(q->queues);
@@ -346,7 +346,7 @@ static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
struct multiq_sched_data *q = qdisc_priv(sch);
tcm->tcm_handle |= TC_H_MIN(cl);
- tcm->tcm_info = q->queues[cl-1]->handle;
+ tcm->tcm_info = q->queues[cl - 1]->handle;
return 0;
}
@@ -378,7 +378,7 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
arg->count++;
continue;
}
- if (arg->fn(sch, band+1, arg) < 0) {
+ if (arg->fn(sch, band + 1, arg) < 0) {
arg->stop = 1;
break;
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 6a3006b..64f0d32 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -211,8 +211,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
cb = netem_skb_cb(skb);
- if (q->gap == 0 || /* not doing reordering */
- q->counter < q->gap || /* inside last reordering gap */
+ if (q->gap == 0 || /* not doing reordering */
+ q->counter < q->gap || /* inside last reordering gap */
q->reorder < get_crandom(&q->reorder_cor)) {
psched_time_t now;
psched_tdiff_t delay;
@@ -248,7 +248,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return ret;
}
-static unsigned int netem_drop(struct Qdisc* sch)
+static unsigned int netem_drop(struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
unsigned int len = 0;
@@ -265,7 +265,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
struct netem_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
- if (sch->flags & TCQ_F_THROTTLED)
+ if (qdisc_is_throttled(sch))
return NULL;
skb = q->qdisc->ops->peek(q->qdisc);
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index fbd710d..2a318f2 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -22,8 +22,7 @@
#include <net/pkt_sched.h>
-struct prio_sched_data
-{
+struct prio_sched_data {
int bands;
struct tcf_proto *filter_list;
u8 prio2band[TC_PRIO_MAX+1];
@@ -54,7 +53,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
if (!q->filter_list || err < 0) {
if (TC_H_MAJ(band))
band = 0;
- return q->queues[q->prio2band[band&TC_PRIO_MAX]];
+ return q->queues[q->prio2band[band & TC_PRIO_MAX]];
}
band = res.classid;
}
@@ -106,7 +105,7 @@ static struct sk_buff *prio_peek(struct Qdisc *sch)
return NULL;
}
-static struct sk_buff *prio_dequeue(struct Qdisc* sch)
+static struct sk_buff *prio_dequeue(struct Qdisc *sch)
{
struct prio_sched_data *q = qdisc_priv(sch);
int prio;
@@ -124,7 +123,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc* sch)
}
-static unsigned int prio_drop(struct Qdisc* sch)
+static unsigned int prio_drop(struct Qdisc *sch)
{
struct prio_sched_data *q = qdisc_priv(sch);
int prio;
@@ -143,24 +142,24 @@ static unsigned int prio_drop(struct Qdisc* sch)
static void
-prio_reset(struct Qdisc* sch)
+prio_reset(struct Qdisc *sch)
{
int prio;
struct prio_sched_data *q = qdisc_priv(sch);
- for (prio=0; prio<q->bands; prio++)
+ for (prio = 0; prio < q->bands; prio++)
qdisc_reset(q->queues[prio]);
sch->q.qlen = 0;
}
static void
-prio_destroy(struct Qdisc* sch)
+prio_destroy(struct Qdisc *sch)
{
int prio;
struct prio_sched_data *q = qdisc_priv(sch);
tcf_destroy_chain(&q->filter_list);
- for (prio=0; prio<q->bands; prio++)
+ for (prio = 0; prio < q->bands; prio++)
qdisc_destroy(q->queues[prio]);
}
@@ -177,7 +176,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
return -EINVAL;
- for (i=0; i<=TC_PRIO_MAX; i++) {
+ for (i = 0; i <= TC_PRIO_MAX; i++) {
if (qopt->priomap[i] >= qopt->bands)
return -EINVAL;
}
@@ -186,7 +185,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
q->bands = qopt->bands;
memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
- for (i=q->bands; i<TCQ_PRIO_BANDS; i++) {
+ for (i = q->bands; i < TCQ_PRIO_BANDS; i++) {
struct Qdisc *child = q->queues[i];
q->queues[i] = &noop_qdisc;
if (child != &noop_qdisc) {
@@ -196,9 +195,10 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
}
sch_tree_unlock(sch);
- for (i=0; i<q->bands; i++) {
+ for (i = 0; i < q->bands; i++) {
if (q->queues[i] == &noop_qdisc) {
struct Qdisc *child, *old;
+
child = qdisc_create_dflt(sch->dev_queue,
&pfifo_qdisc_ops,
TC_H_MAKE(sch->handle, i + 1));
@@ -224,7 +224,7 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt)
struct prio_sched_data *q = qdisc_priv(sch);
int i;
- for (i=0; i<TCQ_PRIO_BANDS; i++)
+ for (i = 0; i < TCQ_PRIO_BANDS; i++)
q->queues[i] = &noop_qdisc;
if (opt == NULL) {
@@ -232,7 +232,7 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt)
} else {
int err;
- if ((err= prio_tune(sch, opt)) != 0)
+ if ((err = prio_tune(sch, opt)) != 0)
return err;
}
return 0;
@@ -245,7 +245,7 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
struct tc_prio_qopt opt;
opt.bands = q->bands;
- memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1);
+ memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
@@ -342,7 +342,7 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
arg->count++;
continue;
}
- if (arg->fn(sch, prio+1, arg) < 0) {
+ if (arg->fn(sch, prio + 1, arg) < 0) {
arg->stop = 1;
break;
}
@@ -350,7 +350,7 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
}
}
-static struct tcf_proto ** prio_find_tcf(struct Qdisc *sch, unsigned long cl)
+static struct tcf_proto **prio_find_tcf(struct Qdisc *sch, unsigned long cl)
{
struct prio_sched_data *q = qdisc_priv(sch);
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 9f98dbd..6649463 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -36,8 +36,7 @@
if RED works correctly.
*/
-struct red_sched_data
-{
+struct red_sched_data {
u32 limit; /* HARD maximal queue length */
unsigned char flags;
struct red_parms parms;
@@ -55,7 +54,7 @@ static inline int red_use_harddrop(struct red_sched_data *q)
return q->flags & TC_RED_HARDDROP;
}
-static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc;
@@ -67,29 +66,29 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
red_end_of_idle_period(&q->parms);
switch (red_action(&q->parms, q->parms.qavg)) {
- case RED_DONT_MARK:
- break;
-
- case RED_PROB_MARK:
- sch->qstats.overlimits++;
- if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
- q->stats.prob_drop++;
- goto congestion_drop;
- }
-
- q->stats.prob_mark++;
- break;
-
- case RED_HARD_MARK:
- sch->qstats.overlimits++;
- if (red_use_harddrop(q) || !red_use_ecn(q) ||
- !INET_ECN_set_ce(skb)) {
- q->stats.forced_drop++;
- goto congestion_drop;
- }
-
- q->stats.forced_mark++;
- break;
+ case RED_DONT_MARK:
+ break;
+
+ case RED_PROB_MARK:
+ sch->qstats.overlimits++;
+ if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
+ q->stats.prob_drop++;
+ goto congestion_drop;
+ }
+
+ q->stats.prob_mark++;
+ break;
+
+ case RED_HARD_MARK:
+ sch->qstats.overlimits++;
+ if (red_use_harddrop(q) || !red_use_ecn(q) ||
+ !INET_ECN_set_ce(skb)) {
+ q->stats.forced_drop++;
+ goto congestion_drop;
+ }
+
+ q->stats.forced_mark++;
+ break;
}
ret = qdisc_enqueue(skb, child);
@@ -106,7 +105,7 @@ congestion_drop:
return NET_XMIT_CN;
}
-static struct sk_buff * red_dequeue(struct Qdisc* sch)
+static struct sk_buff *red_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb;
struct red_sched_data *q = qdisc_priv(sch);
@@ -123,7 +122,7 @@ static struct sk_buff * red_dequeue(struct Qdisc* sch)
return skb;
}
-static struct sk_buff * red_peek(struct Qdisc* sch)
+static struct sk_buff *red_peek(struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc;
@@ -131,7 +130,7 @@ static struct sk_buff * red_peek(struct Qdisc* sch)
return child->ops->peek(child);
}
-static unsigned int red_drop(struct Qdisc* sch)
+static unsigned int red_drop(struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc;
@@ -150,7 +149,7 @@ static unsigned int red_drop(struct Qdisc* sch)
return 0;
}
-static void red_reset(struct Qdisc* sch)
+static void red_reset(struct Qdisc *sch)
{
struct red_sched_data *q = qdisc_priv(sch);
@@ -217,7 +216,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
return 0;
}
-static int red_init(struct Qdisc* sch, struct nlattr *opt)
+static int red_init(struct Qdisc *sch, struct nlattr *opt)
{
struct red_sched_data *q = qdisc_priv(sch);
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index edea8ce..c2e628d 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -21,6 +21,7 @@
#include <linux/skbuff.h>
#include <linux/jhash.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <net/ip.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
@@ -76,7 +77,8 @@
#define SFQ_DEPTH 128 /* max number of packets per flow */
#define SFQ_SLOTS 128 /* max number of flows */
#define SFQ_EMPTY_SLOT 255
-#define SFQ_HASH_DIVISOR 1024
+#define SFQ_DEFAULT_HASH_DIVISOR 1024
+
/* We use 16 bits to store allot, and want to handle packets up to 64K
* Scale allot by 8 (1<<3) so that no overflow occurs.
*/
@@ -92,8 +94,7 @@ typedef unsigned char sfq_index;
* while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
* are 'pointers' to dep[] array
*/
-struct sfq_head
-{
+struct sfq_head {
sfq_index next;
sfq_index prev;
};
@@ -108,13 +109,12 @@ struct sfq_slot {
short allot; /* credit for this slot */
};
-struct sfq_sched_data
-{
+struct sfq_sched_data {
/* Parameters */
int perturb_period;
- unsigned quantum; /* Allotment per round: MUST BE >= MTU */
+ unsigned int quantum; /* Allotment per round: MUST BE >= MTU */
int limit;
-
+ unsigned int divisor; /* number of slots in hash table */
/* Variables */
struct tcf_proto *filter_list;
struct timer_list perturb_timer;
@@ -122,7 +122,7 @@ struct sfq_sched_data
sfq_index cur_depth; /* depth of longest slot */
unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
struct sfq_slot *tail; /* current slot in round */
- sfq_index ht[SFQ_HASH_DIVISOR]; /* Hash table */
+ sfq_index *ht; /* Hash table (divisor slots) */
struct sfq_slot slots[SFQ_SLOTS];
struct sfq_head dep[SFQ_DEPTH]; /* Linked list of slots, indexed by depth */
};
@@ -137,12 +137,12 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index
return &q->dep[val - SFQ_SLOTS];
}
-static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
+static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
{
- return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
+ return jhash_2words(h, h1, q->perturbation) & (q->divisor - 1);
}
-static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
+static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
{
u32 h, h2;
@@ -157,13 +157,13 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
iph = ip_hdr(skb);
h = (__force u32)iph->daddr;
h2 = (__force u32)iph->saddr ^ iph->protocol;
- if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+ if (iph->frag_off & htons(IP_MF | IP_OFFSET))
break;
poff = proto_ports_offset(iph->protocol);
if (poff >= 0 &&
pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
iph = ip_hdr(skb);
- h2 ^= *(u32*)((void *)iph + iph->ihl * 4 + poff);
+ h2 ^= *(u32 *)((void *)iph + iph->ihl * 4 + poff);
}
break;
}
@@ -181,7 +181,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
if (poff >= 0 &&
pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
iph = ipv6_hdr(skb);
- h2 ^= *(u32*)((void *)iph + sizeof(*iph) + poff);
+ h2 ^= *(u32 *)((void *)iph + sizeof(*iph) + poff);
}
break;
}
@@ -203,7 +203,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
if (TC_H_MAJ(skb->priority) == sch->handle &&
TC_H_MIN(skb->priority) > 0 &&
- TC_H_MIN(skb->priority) <= SFQ_HASH_DIVISOR)
+ TC_H_MIN(skb->priority) <= q->divisor)
return TC_H_MIN(skb->priority);
if (!q->filter_list)
@@ -221,7 +221,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
return 0;
}
#endif
- if (TC_H_MIN(res.classid) <= SFQ_HASH_DIVISOR)
+ if (TC_H_MIN(res.classid) <= q->divisor)
return TC_H_MIN(res.classid);
}
return 0;
@@ -491,13 +491,18 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
return -EINVAL;
+ if (ctl->divisor &&
+ (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
+ return -EINVAL;
+
sch_tree_lock(sch);
q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
q->perturb_period = ctl->perturb_period * HZ;
if (ctl->limit)
q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
-
+ if (ctl->divisor)
+ q->divisor = ctl->divisor;
qlen = sch->q.qlen;
while (sch->q.qlen > q->limit)
sfq_drop(sch);
@@ -515,15 +520,13 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
{
struct sfq_sched_data *q = qdisc_priv(sch);
+ size_t sz;
int i;
q->perturb_timer.function = sfq_perturbation;
q->perturb_timer.data = (unsigned long)sch;
init_timer_deferrable(&q->perturb_timer);
- for (i = 0; i < SFQ_HASH_DIVISOR; i++)
- q->ht[i] = SFQ_EMPTY_SLOT;
-
for (i = 0; i < SFQ_DEPTH; i++) {
q->dep[i].next = i + SFQ_SLOTS;
q->dep[i].prev = i + SFQ_SLOTS;
@@ -532,6 +535,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
q->limit = SFQ_DEPTH - 1;
q->cur_depth = 0;
q->tail = NULL;
+ q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
if (opt == NULL) {
q->quantum = psched_mtu(qdisc_dev(sch));
q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
@@ -543,10 +547,23 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
return err;
}
+ sz = sizeof(q->ht[0]) * q->divisor;
+ q->ht = kmalloc(sz, GFP_KERNEL);
+ if (!q->ht && sz > PAGE_SIZE)
+ q->ht = vmalloc(sz);
+ if (!q->ht)
+ return -ENOMEM;
+ for (i = 0; i < q->divisor; i++)
+ q->ht[i] = SFQ_EMPTY_SLOT;
+
for (i = 0; i < SFQ_SLOTS; i++) {
slot_queue_init(&q->slots[i]);
sfq_link(q, i);
}
+ if (q->limit >= 1)
+ sch->flags |= TCQ_F_CAN_BYPASS;
+ else
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
return 0;
}
@@ -557,6 +574,10 @@ static void sfq_destroy(struct Qdisc *sch)
tcf_destroy_chain(&q->filter_list);
q->perturb_period = 0;
del_timer_sync(&q->perturb_timer);
+ if (is_vmalloc_addr(q->ht))
+ vfree(q->ht);
+ else
+ kfree(q->ht);
}
static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -569,7 +590,7 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
opt.perturb_period = q->perturb_period / HZ;
opt.limit = q->limit;
- opt.divisor = SFQ_HASH_DIVISOR;
+ opt.divisor = q->divisor;
opt.flows = q->limit;
NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
@@ -594,6 +615,8 @@ static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
+ /* we cannot bypass queue discipline anymore */
+ sch->flags &= ~TCQ_F_CAN_BYPASS;
return 0;
}
@@ -647,7 +670,7 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
if (arg->stop)
return;
- for (i = 0; i < SFQ_HASH_DIVISOR; i++) {
+ for (i = 0; i < q->divisor; i++) {
if (q->ht[i] == SFQ_EMPTY_SLOT ||
arg->count < arg->skip) {
arg->count++;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index e931658..1dcfb52 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -97,8 +97,7 @@
changed the limit is not effective anymore.
*/
-struct tbf_sched_data
-{
+struct tbf_sched_data {
/* Parameters */
u32 limit; /* Maximal length of backlog: bytes */
u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
@@ -115,10 +114,10 @@ struct tbf_sched_data
struct qdisc_watchdog watchdog; /* Watchdog timer */
};
-#define L2T(q,L) qdisc_l2t((q)->R_tab,L)
-#define L2T_P(q,L) qdisc_l2t((q)->P_tab,L)
+#define L2T(q, L) qdisc_l2t((q)->R_tab, L)
+#define L2T_P(q, L) qdisc_l2t((q)->P_tab, L)
-static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct tbf_sched_data *q = qdisc_priv(sch);
int ret;
@@ -137,7 +136,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
return NET_XMIT_SUCCESS;
}
-static unsigned int tbf_drop(struct Qdisc* sch)
+static unsigned int tbf_drop(struct Qdisc *sch)
{
struct tbf_sched_data *q = qdisc_priv(sch);
unsigned int len = 0;
@@ -149,7 +148,7 @@ static unsigned int tbf_drop(struct Qdisc* sch)
return len;
}
-static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
+static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
{
struct tbf_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
@@ -185,7 +184,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
q->tokens = toks;
q->ptokens = ptoks;
sch->q.qlen--;
- sch->flags &= ~TCQ_F_THROTTLED;
+ qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb);
return skb;
}
@@ -209,7 +208,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
return NULL;
}
-static void tbf_reset(struct Qdisc* sch)
+static void tbf_reset(struct Qdisc *sch)
{
struct tbf_sched_data *q = qdisc_priv(sch);
@@ -227,7 +226,7 @@ static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
[TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
};
-static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
+static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
{
int err;
struct tbf_sched_data *q = qdisc_priv(sch);
@@ -236,7 +235,7 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
struct qdisc_rate_table *rtab = NULL;
struct qdisc_rate_table *ptab = NULL;
struct Qdisc *child = NULL;
- int max_size,n;
+ int max_size, n;
err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy);
if (err < 0)
@@ -259,15 +258,18 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
}
for (n = 0; n < 256; n++)
- if (rtab->data[n] > qopt->buffer) break;
- max_size = (n << qopt->rate.cell_log)-1;
+ if (rtab->data[n] > qopt->buffer)
+ break;
+ max_size = (n << qopt->rate.cell_log) - 1;
if (ptab) {
int size;
for (n = 0; n < 256; n++)
- if (ptab->data[n] > qopt->mtu) break;
- size = (n << qopt->peakrate.cell_log)-1;
- if (size < max_size) max_size = size;
+ if (ptab->data[n] > qopt->mtu)
+ break;
+ size = (n << qopt->peakrate.cell_log) - 1;
+ if (size < max_size)
+ max_size = size;
}
if (max_size < 0)
goto done;
@@ -310,7 +312,7 @@ done:
return err;
}
-static int tbf_init(struct Qdisc* sch, struct nlattr *opt)
+static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
{
struct tbf_sched_data *q = qdisc_priv(sch);
@@ -422,8 +424,7 @@ static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
}
}
-static const struct Qdisc_class_ops tbf_class_ops =
-{
+static const struct Qdisc_class_ops tbf_class_ops = {
.graft = tbf_graft,
.leaf = tbf_leaf,
.get = tbf_get,
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index d84e732..45cd300 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -53,8 +53,7 @@
which will not break load balancing, though native slave
traffic will have the highest priority. */
-struct teql_master
-{
+struct teql_master {
struct Qdisc_ops qops;
struct net_device *dev;
struct Qdisc *slaves;
@@ -65,22 +64,21 @@ struct teql_master
unsigned long tx_dropped;
};
-struct teql_sched_data
-{
+struct teql_sched_data {
struct Qdisc *next;
struct teql_master *m;
struct neighbour *ncache;
struct sk_buff_head q;
};
-#define NEXT_SLAVE(q) (((struct teql_sched_data*)qdisc_priv(q))->next)
+#define NEXT_SLAVE(q) (((struct teql_sched_data *)qdisc_priv(q))->next)
-#define FMASK (IFF_BROADCAST|IFF_POINTOPOINT)
+#define FMASK (IFF_BROADCAST | IFF_POINTOPOINT)
/* "teql*" qdisc routines */
static int
-teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+teql_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct net_device *dev = qdisc_dev(sch);
struct teql_sched_data *q = qdisc_priv(sch);
@@ -96,7 +94,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
}
static struct sk_buff *
-teql_dequeue(struct Qdisc* sch)
+teql_dequeue(struct Qdisc *sch)
{
struct teql_sched_data *dat = qdisc_priv(sch);
struct netdev_queue *dat_queue;
@@ -118,13 +116,13 @@ teql_dequeue(struct Qdisc* sch)
}
static struct sk_buff *
-teql_peek(struct Qdisc* sch)
+teql_peek(struct Qdisc *sch)
{
/* teql is meant to be used as root qdisc */
return NULL;
}
-static __inline__ void
+static inline void
teql_neigh_release(struct neighbour *n)
{
if (n)
@@ -132,7 +130,7 @@ teql_neigh_release(struct neighbour *n)
}
static void
-teql_reset(struct Qdisc* sch)
+teql_reset(struct Qdisc *sch)
{
struct teql_sched_data *dat = qdisc_priv(sch);
@@ -142,13 +140,14 @@ teql_reset(struct Qdisc* sch)
}
static void
-teql_destroy(struct Qdisc* sch)
+teql_destroy(struct Qdisc *sch)
{
struct Qdisc *q, *prev;
struct teql_sched_data *dat = qdisc_priv(sch);
struct teql_master *master = dat->m;
- if ((prev = master->slaves) != NULL) {
+ prev = master->slaves;
+ if (prev) {
do {
q = NEXT_SLAVE(prev);
if (q == sch) {
@@ -180,7 +179,7 @@ teql_destroy(struct Qdisc* sch)
static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
{
struct net_device *dev = qdisc_dev(sch);
- struct teql_master *m = (struct teql_master*)sch->ops;
+ struct teql_master *m = (struct teql_master *)sch->ops;
struct teql_sched_data *q = qdisc_priv(sch);
if (dev->hard_header_len > m->dev->hard_header_len)
@@ -291,7 +290,8 @@ restart:
nores = 0;
busy = 0;
- if ((q = start) == NULL)
+ q = start;
+ if (!q)
goto drop;
do {
@@ -356,10 +356,10 @@ drop:
static int teql_master_open(struct net_device *dev)
{
- struct Qdisc * q;
+ struct Qdisc *q;
struct teql_master *m = netdev_priv(dev);
int mtu = 0xFFFE;
- unsigned flags = IFF_NOARP|IFF_MULTICAST;
+ unsigned int flags = IFF_NOARP | IFF_MULTICAST;
if (m->slaves == NULL)
return -EUNATCH;
@@ -427,7 +427,7 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu)
do {
if (new_mtu > qdisc_dev(q)->mtu)
return -EINVAL;
- } while ((q=NEXT_SLAVE(q)) != m->slaves);
+ } while ((q = NEXT_SLAVE(q)) != m->slaves);
}
dev->mtu = new_mtu;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index dd419d2..d8d98d5 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1475,6 +1475,12 @@ restart:
goto out_free;
}
+ if (sk_filter(other, skb) < 0) {
+ /* Toss the packet but do not return any error to the sender */
+ err = len;
+ goto out_free;
+ }
+
unix_state_lock(other);
err = -EPERM;
if (!unix_may_send(sk, other))
@@ -1978,36 +1984,38 @@ static int unix_shutdown(struct socket *sock, int mode)
mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
- if (mode) {
- unix_state_lock(sk);
- sk->sk_shutdown |= mode;
- other = unix_peer(sk);
- if (other)
- sock_hold(other);
- unix_state_unlock(sk);
- sk->sk_state_change(sk);
-
- if (other &&
- (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
-
- int peer_mode = 0;
-
- if (mode&RCV_SHUTDOWN)
- peer_mode |= SEND_SHUTDOWN;
- if (mode&SEND_SHUTDOWN)
- peer_mode |= RCV_SHUTDOWN;
- unix_state_lock(other);
- other->sk_shutdown |= peer_mode;
- unix_state_unlock(other);
- other->sk_state_change(other);
- if (peer_mode == SHUTDOWN_MASK)
- sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
- else if (peer_mode & RCV_SHUTDOWN)
- sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
- }
- if (other)
- sock_put(other);
+ if (!mode)
+ return 0;
+
+ unix_state_lock(sk);
+ sk->sk_shutdown |= mode;
+ other = unix_peer(sk);
+ if (other)
+ sock_hold(other);
+ unix_state_unlock(sk);
+ sk->sk_state_change(sk);
+
+ if (other &&
+ (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
+
+ int peer_mode = 0;
+
+ if (mode&RCV_SHUTDOWN)
+ peer_mode |= SEND_SHUTDOWN;
+ if (mode&SEND_SHUTDOWN)
+ peer_mode |= RCV_SHUTDOWN;
+ unix_state_lock(other);
+ other->sk_shutdown |= peer_mode;
+ unix_state_unlock(other);
+ other->sk_state_change(other);
+ if (peer_mode == SHUTDOWN_MASK)
+ sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
+ else if (peer_mode & RCV_SHUTDOWN)
+ sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
}
+ if (other)
+ sock_put(other);
+
return 0;
}
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 74944a2..788a12c 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -59,8 +59,6 @@
#include <asm/uaccess.h> /* copy_to/from_user */
#include <linux/init.h> /* __initfunc et al. */
-#define KMEM_SAFETYZONE 8
-
#define DEV_TO_SLAVE(dev) (*((struct net_device **)netdev_priv(dev)))
/*
diff --git a/net/wireless/core.c b/net/wireless/core.c
index e9a5f8c..fe01de2 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -718,13 +718,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
wdev->ps = false;
/* allow mac80211 to determine the timeout */
wdev->ps_timeout = -1;
- if (rdev->ops->set_power_mgmt)
- if (rdev->ops->set_power_mgmt(wdev->wiphy, dev,
- wdev->ps,
- wdev->ps_timeout)) {
- /* assume this means it's off */
- wdev->ps = false;
- }
if (!dev->ethtool_ops)
dev->ethtool_ops = &cfg80211_ethtool_ops;
@@ -813,6 +806,19 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
rdev->opencount++;
mutex_unlock(&rdev->devlist_mtx);
cfg80211_unlock_rdev(rdev);
+
+ /*
+ * Configure power management to the driver here so that its
+ * correctly set also after interface type changes etc.
+ */
+ if (wdev->iftype == NL80211_IFTYPE_STATION &&
+ rdev->ops->set_power_mgmt)
+ if (rdev->ops->set_power_mgmt(wdev->wiphy, dev,
+ wdev->ps,
+ wdev->ps_timeout)) {
+ /* assume this means it's off */
+ wdev->ps = false;
+ }
break;
case NETDEV_UNREGISTER:
/*
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 9b62710..864ddfb 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2718,7 +2718,7 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
NL80211_CMD_GET_MESH_CONFIG);
if (!hdr)
- goto nla_put_failure;
+ goto out;
pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_CONFIG);
if (!pinfoattr)
goto nla_put_failure;
@@ -2759,6 +2759,7 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
nla_put_failure:
genlmsg_cancel(msg, hdr);
+ out:
nlmsg_free(msg);
return -ENOBUFS;
}
@@ -2954,7 +2955,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
NL80211_CMD_GET_REG);
if (!hdr)
- goto nla_put_failure;
+ goto put_failure;
NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2,
cfg80211_regdomain->alpha2);
@@ -3001,6 +3002,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
nla_put_failure:
genlmsg_cancel(msg, hdr);
+put_failure:
nlmsg_free(msg);
err = -EMSGSIZE;
out:
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 37693b6..c565689 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1801,9 +1801,9 @@ void regulatory_hint_disconnect(void)
static bool freq_is_chan_12_13_14(u16 freq)
{
- if (freq == ieee80211_channel_to_frequency(12) ||
- freq == ieee80211_channel_to_frequency(13) ||
- freq == ieee80211_channel_to_frequency(14))
+ if (freq == ieee80211_channel_to_frequency(12, IEEE80211_BAND_2GHZ) ||
+ freq == ieee80211_channel_to_frequency(13, IEEE80211_BAND_2GHZ) ||
+ freq == ieee80211_channel_to_frequency(14, IEEE80211_BAND_2GHZ))
return true;
return false;
}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 7620ae2..6a750bc 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -29,29 +29,37 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
}
EXPORT_SYMBOL(ieee80211_get_response_rate);
-int ieee80211_channel_to_frequency(int chan)
+int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band)
{
- if (chan < 14)
- return 2407 + chan * 5;
-
- if (chan == 14)
- return 2484;
-
- /* FIXME: 802.11j 17.3.8.3.2 */
- return (chan + 1000) * 5;
+ /* see 802.11 17.3.8.3.2 and Annex J
+ * there are overlapping channel numbers in 5GHz and 2GHz bands */
+ if (band == IEEE80211_BAND_5GHZ) {
+ if (chan >= 182 && chan <= 196)
+ return 4000 + chan * 5;
+ else
+ return 5000 + chan * 5;
+ } else { /* IEEE80211_BAND_2GHZ */
+ if (chan == 14)
+ return 2484;
+ else if (chan < 14)
+ return 2407 + chan * 5;
+ else
+ return 0; /* not supported */
+ }
}
EXPORT_SYMBOL(ieee80211_channel_to_frequency);
int ieee80211_frequency_to_channel(int freq)
{
+ /* see 802.11 17.3.8.3.2 and Annex J */
if (freq == 2484)
return 14;
-
- if (freq < 2484)
+ else if (freq < 2484)
return (freq - 2407) / 5;
-
- /* FIXME: 802.11j 17.3.8.3.2 */
- return freq/5 - 1000;
+ else if (freq >= 4910 && freq <= 4980)
+ return (freq - 4000) / 5;
+ else
+ return (freq - 5000) / 5;
}
EXPORT_SYMBOL(ieee80211_frequency_to_channel);
@@ -159,12 +167,15 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
/*
* Disallow pairwise keys with non-zero index unless it's WEP
- * (because current deployments use pairwise WEP keys with
- * non-zero indizes but 802.11i clearly specifies to use zero)
+ * or a vendor specific cipher (because current deployments use
+ * pairwise WEP keys with non-zero indices and for vendor specific
+ * ciphers this should be validated in the driver or hardware level
+ * - but 802.11i clearly specifies to use zero)
*/
if (pairwise && key_idx &&
- params->cipher != WLAN_CIPHER_SUITE_WEP40 &&
- params->cipher != WLAN_CIPHER_SUITE_WEP104)
+ ((params->cipher == WLAN_CIPHER_SUITE_TKIP) ||
+ (params->cipher == WLAN_CIPHER_SUITE_CCMP) ||
+ (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC)))
return -EINVAL;
switch (params->cipher) {
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 3e5dbd4..7f1f4ec 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -267,9 +267,12 @@ int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq)
* -EINVAL for impossible things.
*/
if (freq->e == 0) {
+ enum ieee80211_band band = IEEE80211_BAND_2GHZ;
if (freq->m < 0)
return 0;
- return ieee80211_channel_to_frequency(freq->m);
+ if (freq->m > 14)
+ band = IEEE80211_BAND_5GHZ;
+ return ieee80211_channel_to_frequency(freq->m, band);
} else {
int i, div = 1000000;
for (i = 0; i < freq->e; i++)
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 6459588..7a8e2c7 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1340,7 +1340,7 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
default:
BUG();
}
- xdst = dst_alloc(dst_ops);
+ xdst = dst_alloc(dst_ops, 0);
xfrm_policy_put_afinfo(afinfo);
if (likely(xdst))
OpenPOWER on IntegriCloud