summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/acpi_pad.c7
-rw-r--r--drivers/acpi/apei/apei-base.c17
-rw-r--r--drivers/acpi/apei/apei-internal.h9
-rw-r--r--drivers/acpi/apei/ghes.c6
-rw-r--r--drivers/acpi/battery.c10
-rw-r--r--drivers/acpi/bus.c88
-rw-r--r--drivers/acpi/power.c2
-rw-r--r--drivers/acpi/processor_idle.c32
-rw-r--r--drivers/acpi/processor_perflib.c30
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/sleep.c49
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/video.c35
-rw-r--r--drivers/ata/pata_arasan_cf.c4
-rw-r--r--drivers/base/dd.c2
-rw-r--r--drivers/base/power/main.c6
-rw-r--r--drivers/base/regmap/regmap.c10
-rw-r--r--drivers/block/drbd/drbd_bitmap.c11
-rw-r--r--drivers/block/drbd/drbd_req.c66
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c166
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h5
-rw-r--r--drivers/block/umem.c40
-rw-r--r--drivers/block/xen-blkback/common.h2
-rw-r--r--drivers/block/xen-blkfront.c58
-rw-r--r--drivers/char/agp/intel-agp.c1
-rw-r--r--drivers/char/agp/intel-agp.h1
-rw-r--r--drivers/char/hw_random/atmel-rng.c9
-rw-r--r--drivers/clk/clk.c38
-rw-r--r--drivers/clk/mxs/clk-imx23.c12
-rw-r--r--drivers/clk/mxs/clk-imx28.c32
-rw-r--r--drivers/clk/spear/clk-aux-synth.c2
-rw-r--r--drivers/clk/spear/clk-frac-synth.c2
-rw-r--r--drivers/clk/spear/clk-gpt-synth.c2
-rw-r--r--drivers/clk/spear/clk-vco-pll.c2
-rw-r--r--drivers/clk/spear/clk.c2
-rw-r--r--drivers/clk/spear/clk.h2
-rw-r--r--drivers/clk/spear/spear1310_clock.c2
-rw-r--r--drivers/clk/spear/spear1340_clock.c2
-rw-r--r--drivers/clk/spear/spear3xx_clock.c2
-rw-r--r--drivers/clk/spear/spear6xx_clock.c4
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/em_sti.c406
-rw-r--r--drivers/clocksource/sh_cmt.c26
-rw-r--r--drivers/clocksource/sh_mtu2.c6
-rw-r--r--drivers/clocksource/sh_tmu.c16
-rw-r--r--drivers/connector/connector.c25
-rw-r--r--drivers/dma/dw_dmac.c2
-rw-r--r--drivers/dma/imx-sdma.c6
-rw-r--r--drivers/dma/pl330.c30
-rw-r--r--drivers/edac/edac_mc.c2
-rw-r--r--drivers/edac/i7core_edac.c15
-rw-r--r--drivers/edac/mpc85xx_edac.c3
-rw-r--r--drivers/edac/sb_edac.c10
-rw-r--r--drivers/extcon/extcon-max8997.c5
-rw-r--r--drivers/extcon/extcon_class.c2
-rw-r--r--drivers/extcon/extcon_gpio.c2
-rw-r--r--drivers/gpio/gpio-samsung.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c12
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c37
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c13
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c39
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h43
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c37
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c60
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c23
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c10
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c49
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h1
-rw-r--r--drivers/gpu/drm/radeon/ni.c26
-rw-r--r--drivers/gpu/drm/radeon/r600.c16
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c7
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c42
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c8
-rw-r--r--drivers/gpu/drm/radeon/r600d.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c10
-rw-r--r--drivers/gpu/drm/radeon/rs600.c12
-rw-r--r--drivers/gpu/drm/radeon/rs690.c12
-rw-r--r--drivers/gpu/drm/radeon/rv770.c23
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h3
-rw-r--r--drivers/gpu/drm/radeon/si.c481
-rw-r--r--drivers/gpu/drm/radeon/si_reg.h72
-rw-r--r--drivers/gpu/drm/radeon/sid.h19
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c13
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c15
-rw-r--r--drivers/gpu/drm/udl/udl_main.c2
-rw-r--r--drivers/gpu/drm/via/via_map.c3
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c27
-rw-r--r--drivers/hid/Kconfig43
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-logitech-dj.c38
-rw-r--r--drivers/hid/hid-magicmouse.c6
-rw-r--r--drivers/hid/usbhid/Kconfig8
-rw-r--r--drivers/hwmon/applesmc.c6
-rw-r--r--drivers/hwmon/coretemp.c37
-rw-r--r--drivers/hwmon/emc2103.c12
-rw-r--r--drivers/hwmon/jc42.c2
-rw-r--r--drivers/hwmon/lineage-pem.c2
-rw-r--r--drivers/hwmon/ltc4261.c2
-rw-r--r--drivers/hwmon/max16065.c2
-rw-r--r--drivers/i2c/muxes/Kconfig12
-rw-r--r--drivers/i2c/muxes/Makefile1
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c279
-rw-r--r--drivers/ide/icside.c17
-rw-r--r--drivers/ide/ide-cs.c3
-rw-r--r--drivers/ieee802154/Kconfig6
-rw-r--r--drivers/ieee802154/Makefile1
-rw-r--r--drivers/ieee802154/at86rf230.c968
-rw-r--r--drivers/iio/Kconfig3
-rw-r--r--drivers/iio/industrialio-core.c16
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/core/netlink.c17
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c83
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h9
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c22
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_abi.h5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c27
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c64
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c17
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c35
-rw-r--r--drivers/iommu/amd_iommu.c71
-rw-r--r--drivers/iommu/amd_iommu_init.c13
-rw-r--r--drivers/iommu/amd_iommu_types.h3
-rw-r--r--drivers/isdn/mISDN/stack.c4
-rw-r--r--drivers/leds/Kconfig4
-rw-r--r--drivers/leds/led-class.c2
-rw-r--r--drivers/leds/led-core.c7
-rw-r--r--drivers/md/dm-thin.c7
-rw-r--r--drivers/md/md.c8
-rw-r--r--drivers/md/multipath.c3
-rw-r--r--drivers/md/persistent-data/dm-space-map-checker.c54
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c11
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c11
-rw-r--r--drivers/md/raid1.c17
-rw-r--r--drivers/md/raid10.c30
-rw-r--r--drivers/md/raid5.c67
-rw-r--r--drivers/media/common/saa7146_fops.c5
-rw-r--r--drivers/media/dvb/frontends/cx24110.c4
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_c.c2
-rw-r--r--drivers/media/dvb/frontends/lg2160.c2
-rw-r--r--drivers/media/dvb/siano/smsusb.c2
-rw-r--r--drivers/media/radio/radio-maxiradio.c2
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c2
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c84
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c5
-rw-r--r--drivers/media/video/bt8xx/bttv.h1
-rw-r--r--drivers/media/video/bt8xx/bttvp.h1
-rw-r--r--drivers/media/video/bw-qcam.c47
-rw-r--r--drivers/media/video/cx18/cx18-driver.c10
-rw-r--r--drivers/media/video/cx18/cx18-driver.h2
-rw-r--r--drivers/media/video/cx18/cx18-firmware.c9
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.c15
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c1
-rw-r--r--drivers/media/video/em28xx/em28xx-input.c2
-rw-r--r--drivers/media/video/gspca/gspca.c4
-rw-r--r--drivers/media/video/gspca/ov534.c32
-rw-r--r--drivers/media/video/gspca/ov534_9.c1
-rw-r--r--drivers/media/video/gspca/pac7311.c2
-rw-r--r--drivers/media/video/gspca/sn9c20x.c24
-rw-r--r--drivers/media/video/gspca/sonixj.c2
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c18
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h2
-rw-r--r--drivers/media/video/mem2mem_testdev.c50
-rw-r--r--drivers/media/video/mx2_camera.c52
-rw-r--r--drivers/media/video/pms.c1
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c2
-rw-r--r--drivers/media/video/s5p-mfc/regs-mfc.h5
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_dec.c4
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_enc.c12
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_opr.h4
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_shm.h3
-rw-r--r--drivers/media/video/smiapp/Kconfig2
-rw-r--r--drivers/media/video/smiapp/smiapp-core.c1
-rw-r--r--drivers/media/video/tuner-core.c2
-rw-r--r--drivers/media/video/v4l2-dev.c4
-rw-r--r--drivers/media/video/v4l2-ioctl.c1
-rw-r--r--drivers/media/video/vino.c4
-rw-r--r--drivers/media/video/vivi.c6
-rw-r--r--drivers/mfd/stmpe-i2c.c2
-rw-r--r--drivers/mfd/stmpe-spi.c4
-rw-r--r--drivers/misc/mei/interrupt.c2
-rw-r--r--drivers/misc/mei/main.c9
-rw-r--r--drivers/misc/mei/wd.c2
-rw-r--r--drivers/mmc/card/block.c14
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/sd.c6
-rw-r--r--drivers/mmc/core/sdio.c6
-rw-r--r--drivers/mmc/host/atmel-mci-regs.h14
-rw-r--r--drivers/mmc/host/atmel-mci.c12
-rw-r--r--drivers/mmc/host/dw_mmc.c36
-rw-r--r--drivers/mmc/host/mmci.c19
-rw-r--r--drivers/mmc/host/mxs-mmc.c2
-rw-r--r--drivers/mmc/host/omap.c18
-rw-r--r--drivers/mmc/host/omap_hsmmc.c15
-rw-r--r--drivers/mmc/host/sdhci-s3c.c2
-rw-r--r--drivers/mmc/host/sdhci-spear.c4
-rw-r--r--drivers/mmc/host/sdhci.c4
-rw-r--r--drivers/mtd/mtdoops.c22
-rw-r--r--drivers/mtd/ubi/debug.c12
-rw-r--r--drivers/mtd/ubi/wl.c17
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c13
-rw-r--r--drivers/net/bonding/bond_3ad.h4
-rw-r--r--drivers/net/bonding/bond_alb.c26
-rw-r--r--drivers/net/bonding/bond_debugfs.c2
-rw-r--r--drivers/net/bonding/bond_main.c73
-rw-r--r--drivers/net/bonding/bond_procfs.c15
-rw-r--r--drivers/net/bonding/bond_sysfs.c8
-rw-r--r--drivers/net/bonding/bonding.h4
-rw-r--r--drivers/net/caif/caif_hsi.c551
-rw-r--r--drivers/net/can/bfin_can.c2
-rw-r--r--drivers/net/can/c_can/Kconfig20
-rw-r--r--drivers/net/can/c_can/Makefile1
-rw-r--r--drivers/net/can/c_can/c_can.c138
-rw-r--r--drivers/net/can/c_can/c_can.h164
-rw-r--r--drivers/net/can/c_can/c_can_pci.c221
-rw-r--r--drivers/net/can/c_can/c_can_platform.c76
-rw-r--r--drivers/net/can/cc770/cc770.c2
-rw-r--r--drivers/net/can/cc770/cc770_platform.c2
-rw-r--r--drivers/net/can/dev.c37
-rw-r--r--drivers/net/can/flexcan.c107
-rw-r--r--drivers/net/can/mcp251x.c3
-rw-r--r--drivers/net/can/vcan.c27
-rw-r--r--drivers/net/cris/eth_v10.c2
-rw-r--r--drivers/net/dummy.c19
-rw-r--r--drivers/net/ethernet/3com/3c501.c2
-rw-r--r--drivers/net/ethernet/8390/Kconfig14
-rw-r--r--drivers/net/ethernet/8390/Makefile1
-rw-r--r--drivers/net/ethernet/8390/apne.c2
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c480
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c8
-rw-r--r--drivers/net/ethernet/amd/declance.c4
-rw-r--r--drivers/net/ethernet/amd/lance.c5
-rw-r--r--drivers/net/ethernet/apple/macmace.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c6
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c78
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c105
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_param.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c43
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c56
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c10
-rw-r--r--drivers/net/ethernet/broadcom/b44.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c99
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h45
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h181
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c269
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h63
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c584
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h184
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c1284
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h53
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c279
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h168
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h128
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h42
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c68
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c63
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h13
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c5
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c97
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cs.h34
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h63
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_cna.h15
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h35
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_status.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c393
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h43
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c48
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_msgq.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h81
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_cna.h42
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h107
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_reg.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h51
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c15
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h33
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c17
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h66
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c12
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/cna_fwimg.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.c13
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c5
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h31
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c175
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h44
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c512
-rw-r--r--drivers/net/ethernet/ethoc.c4
-rw-r--r--drivers/net/ethernet/freescale/fec.c32
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c29
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c508
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c420
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/hp/hp100.c6
-rw-r--r--drivers/net/ethernet/i825xx/lp486e.c8
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c4
-rw-r--r--drivers/net/ethernet/intel/Kconfig10
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c92
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c43
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c8
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h25
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c52
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c157
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c12
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c5
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c155
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c160
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c254
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c200
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h39
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c6
-rw-r--r--drivers/net/ethernet/lantiq_etop.c1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c19
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c28
-rw-r--r--drivers/net/ethernet/marvell/sky2.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c382
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c334
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c524
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c111
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c282
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c35
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c10
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c24
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c8
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c15
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h3
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c5
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c5
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c26
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c12
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c10
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c42
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c21
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c37
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h13
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c315
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c100
-rw-r--r--drivers/net/ethernet/rdc/r6040.c16
-rw-r--r--drivers/net/ethernet/realtek/r8169.c991
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c383
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h77
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h3
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/rx.c1
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c4
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c17
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c101
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h71
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c234
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c8
-rw-r--r--drivers/net/ethernet/sun/niu.c12
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c1
-rw-r--r--drivers/net/ethernet/sun/sunhme.c3
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c177
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c1
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c78
-rw-r--r--drivers/net/ethernet/tile/Kconfig2
-rw-r--r--drivers/net/ethernet/tile/Makefile4
-rw-r--r--drivers/net/ethernet/tile/tilegx.c1898
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c6
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c3
-rw-r--r--drivers/net/fddi/defxx.c4
-rw-r--r--drivers/net/fddi/skfp/pmf.c8
-rw-r--r--drivers/net/hamradio/mkiss.c8
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c14
-rw-r--r--drivers/net/irda/ali-ircc.c6
-rw-r--r--drivers/net/irda/au1k_ir.c2
-rw-r--r--drivers/net/macvtap.c8
-rw-r--r--drivers/net/phy/Kconfig5
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/amd.c8
-rw-r--r--drivers/net/phy/bcm63xx.c31
-rw-r--r--drivers/net/phy/bcm87xx.c231
-rw-r--r--drivers/net/phy/broadcom.c119
-rw-r--r--drivers/net/phy/cicada.c35
-rw-r--r--drivers/net/phy/davicom.c41
-rw-r--r--drivers/net/phy/dp83640.c23
-rw-r--r--drivers/net/phy/fixed.c4
-rw-r--r--drivers/net/phy/icplus.c38
-rw-r--r--drivers/net/phy/lxt.c47
-rw-r--r--drivers/net/phy/marvell.c22
-rw-r--r--drivers/net/phy/mdio-mux.c10
-rw-r--r--drivers/net/phy/mdio_bus.c16
-rw-r--r--drivers/net/phy/micrel.c70
-rw-r--r--drivers/net/phy/national.c8
-rw-r--r--drivers/net/phy/phy.c316
-rw-r--r--drivers/net/phy/phy_device.c139
-rw-r--r--drivers/net/phy/realtek.c6
-rw-r--r--drivers/net/phy/smsc.c64
-rw-r--r--drivers/net/phy/spi_ks8995.c4
-rw-r--r--drivers/net/phy/ste10Xp.c21
-rw-r--r--drivers/net/phy/vitesse.c52
-rw-r--r--drivers/net/slip/slip.c4
-rw-r--r--drivers/net/team/Kconfig13
-rw-r--r--drivers/net/team/Makefile1
-rw-r--r--drivers/net/team/team.c594
-rw-r--r--drivers/net/team/team_mode_activebackup.c14
-rw-r--r--drivers/net/team/team_mode_broadcast.c88
-rw-r--r--drivers/net/team/team_mode_loadbalance.c543
-rw-r--r--drivers/net/team/team_mode_roundrobin.c10
-rw-r--r--drivers/net/usb/asix.c28
-rw-r--r--drivers/net/usb/cdc-phonet.c4
-rw-r--r--drivers/net/usb/ipheth.c5
-rw-r--r--drivers/net/usb/pegasus.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c417
-rw-r--r--drivers/net/usb/sierra_net.c14
-rw-r--r--drivers/net/usb/smsc95xx.c32
-rw-r--r--drivers/net/usb/usbnet.c128
-rw-r--r--drivers/net/virtio_net.c21
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/wan/x25_asy.c2
-rw-r--r--drivers/net/wimax/i2400m/fw.c2
-rw-r--r--drivers/net/wireless/adm8211.c3
-rw-r--r--drivers/net/wireless/airo.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c4
-rw-r--r--drivers/net/wireless/atmel.c4
-rw-r--r--drivers/net/wireless/b43/main.c2
-rw-r--r--drivers/net/wireless/b43legacy/dma.c4
-rw-r--r--drivers/net/wireless/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_proc.c3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c4
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c2
-rw-r--r--drivers/net/wireless/libertas/debugfs.c4
-rw-r--r--drivers/net/wireless/libertas/if_usb.c2
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/mwifiex/11n.c14
-rw-r--r--drivers/net/wireless/mwifiex/11n.h3
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c18
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/mwifiex/join.c20
-rw-r--r--drivers/net/wireless/mwifiex/scan.c24
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c16
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c24
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c2
-rw-r--r--drivers/net/wireless/p54/eeprom.c2
-rw-r--r--drivers/net/wireless/p54/fwio.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c2
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rtlwifi/base.c2
-rw-r--r--drivers/net/wireless/rtlwifi/cam.c2
-rw-r--r--drivers/net/wireless/rtlwifi/core.c14
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c4
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c14
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c10
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c43
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c43
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/fw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c34
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c46
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h2
-rw-r--r--drivers/net/xen-netback/netback.c4
-rw-r--r--drivers/net/xen-netfront.c8
-rw-r--r--drivers/of/of_mdio.c16
-rw-r--r--drivers/oprofile/oprofile_perf.c2
-rw-r--r--drivers/pci/pci.c5
-rw-r--r--drivers/pci/quirks.c26
-rw-r--r--drivers/pinctrl/core.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx.c34
-rw-r--r--drivers/pinctrl/pinctrl-mxs.c13
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c25
-rw-r--r--drivers/pinctrl/pinctrl-sirf.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.h2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1310.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1340.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear300.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear310.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear320.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.h2
-rw-r--r--drivers/platform/x86/acerhdf.c2
-rw-r--r--drivers/regulator/ab8500.c22
-rw-r--r--drivers/regulator/anatop-regulator.c2
-rw-r--r--drivers/regulator/core.c3
-rw-r--r--drivers/regulator/db8500-prcmu.c40
-rw-r--r--drivers/regulator/gpio-regulator.c16
-rw-r--r--drivers/regulator/max8649.c1
-rw-r--r--drivers/regulator/palmas-regulator.c14
-rw-r--r--drivers/regulator/s5m8767.c2
-rw-r--r--drivers/regulator/tps65023-regulator.c2
-rw-r--r--drivers/regulator/tps6524x-regulator.c2
-rw-r--r--drivers/remoteproc/omap_remoteproc.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c10
-rw-r--r--drivers/rtc/rtc-cmos.c9
-rw-r--r--drivers/s390/net/qeth_l3_main.c3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c44
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h16
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h58
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c35
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c40
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c11
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c3
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c5
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c12
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h1
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c152
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_netlink.c7
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c9
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/spi/spi-omap2-mcspi.c3
-rw-r--r--drivers/staging/comedi/drivers.c5
-rw-r--r--drivers/staging/gdm72xx/netlink_k.c18
-rw-r--r--drivers/staging/iio/Documentation/device.txt2
-rw-r--r--drivers/staging/iio/adc/Kconfig1
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c3
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c6
-rw-r--r--drivers/staging/omapdrm/omap_fbdev.c10
-rw-r--r--drivers/staging/ramster/zcache-main.c8
-rw-r--r--drivers/staging/rtl8712/usb_intf.c2
-rw-r--r--drivers/staging/zcache/zcache-main.c10
-rw-r--r--drivers/target/sbp/sbp_target.c8
-rw-r--r--drivers/target/target_core_alua.c5
-rw-r--r--drivers/target/target_core_file.c70
-rw-r--r--drivers/target/target_core_file.h1
-rw-r--r--drivers/target/target_core_transport.c8
-rw-r--r--drivers/tty/hvc/hvc_xen.c31
-rw-r--r--drivers/tty/serial/8250/8250.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c45
-rw-r--r--drivers/tty/serial/serial_txx9.c2
-rw-r--r--drivers/tty/serial/sh-sci.c38
-rw-r--r--drivers/usb/Makefile2
-rw-r--r--drivers/usb/class/cdc-acm.c8
-rw-r--r--drivers/usb/class/cdc-wdm.c9
-rw-r--r--drivers/usb/core/hcd-pci.c9
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/core/message.c3
-rw-r--r--drivers/usb/dwc3/gadget.c3
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c6
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.h4
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c4
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h4
-rw-r--r--drivers/usb/gadget/goku_udc.c2
-rw-r--r--drivers/usb/gadget/lpc32xx_udc.c1
-rw-r--r--drivers/usb/gadget/mv_udc_core.c2
-rw-r--r--drivers/usb/gadget/omap_udc.c2
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c2
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c2
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c2
-rw-r--r--drivers/usb/host/ehci-omap.c168
-rw-r--r--drivers/usb/host/ehci-pci.c8
-rw-r--r--drivers/usb/host/ehci-sh.c3
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c4
-rw-r--r--drivers/usb/host/ohci-hub.c2
-rw-r--r--drivers/usb/host/xhci-mem.c74
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/musb/davinci.c1
-rw-r--r--drivers/usb/musb/davinci.h4
-rw-r--r--drivers/usb/musb/musb_gadget.c1
-rw-r--r--drivers/usb/musb/musb_host.c14
-rw-r--r--drivers/usb/otg/twl6030-usb.c15
-rw-r--r--drivers/usb/phy/Kconfig4
-rw-r--r--drivers/usb/serial/cp210x.c12
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/generic.c10
-rw-r--r--drivers/usb/serial/mct_u232.c13
-rw-r--r--drivers/usb/serial/mos7840.c2
-rw-r--r--drivers/usb/serial/option.c104
-rw-r--r--drivers/usb/serial/qcserial.c6
-rw-r--r--drivers/usb/serial/sierra.c4
-rw-r--r--drivers/usb/serial/usb-serial.c12
-rw-r--r--drivers/usb/storage/scsiglue.c6
-rw-r--r--drivers/vhost/vhost.c3
-rw-r--r--drivers/video/backlight/Kconfig2
-rw-r--r--drivers/video/backlight/ili9320.c2
-rw-r--r--drivers/video/bfin_adv7393fb.c6
-rw-r--r--drivers/video/broadsheetfb.c2
-rw-r--r--drivers/video/console/Kconfig14
-rw-r--r--drivers/video/mbx/mbxfb.c2
-rw-r--r--drivers/video/omap2/displays/panel-taal.c2
-rw-r--r--drivers/video/omap2/dss/core.c3
-rw-r--r--drivers/video/omap2/dss/dsi.c2
-rw-r--r--drivers/video/omap2/dss/dss.c2
-rw-r--r--drivers/video/s3c-fb.c12
-rw-r--r--drivers/video/savage/savagefb_driver.c10
-rw-r--r--drivers/watchdog/hpwdt.c4
-rw-r--r--drivers/watchdog/iTCO_wdt.c1
-rw-r--r--drivers/watchdog/sp805_wdt.c4
-rw-r--r--drivers/watchdog/watchdog_dev.c2
-rw-r--r--drivers/xen/events.c9
-rw-r--r--drivers/xen/pci.c2
-rw-r--r--drivers/xen/tmem.c8
710 files changed, 19492 insertions, 8591 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 47768ff..8099895 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -208,7 +208,7 @@ config ACPI_IPMI
config ACPI_HOTPLUG_CPU
bool
- depends on ACPI_PROCESSOR && HOTPLUG_CPU
+ depends on EXPERIMENTAL && ACPI_PROCESSOR && HOTPLUG_CPU
select ACPI_CONTAINER
default y
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index a43fa1a..1502c502 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -36,6 +36,7 @@
#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
static DEFINE_MUTEX(isolated_cpus_lock);
+static DEFINE_MUTEX(round_robin_lock);
static unsigned long power_saving_mwait_eax;
@@ -107,7 +108,7 @@ static void round_robin_cpu(unsigned int tsk_index)
if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
return;
- mutex_lock(&isolated_cpus_lock);
+ mutex_lock(&round_robin_lock);
cpumask_clear(tmp);
for_each_cpu(cpu, pad_busy_cpus)
cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
@@ -116,7 +117,7 @@ static void round_robin_cpu(unsigned int tsk_index)
if (cpumask_empty(tmp))
cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
if (cpumask_empty(tmp)) {
- mutex_unlock(&isolated_cpus_lock);
+ mutex_unlock(&round_robin_lock);
return;
}
for_each_cpu(cpu, tmp) {
@@ -131,7 +132,7 @@ static void round_robin_cpu(unsigned int tsk_index)
tsk_in_cpu[tsk_index] = preferred_cpu;
cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
cpu_weight[preferred_cpu]++;
- mutex_unlock(&isolated_cpus_lock);
+ mutex_unlock(&round_robin_lock);
set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
}
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 5577762..6686b1e 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -243,7 +243,7 @@ static int pre_map_gar_callback(struct apei_exec_context *ctx,
u8 ins = entry->instruction;
if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
- return acpi_os_map_generic_address(&entry->register_region);
+ return apei_map_generic_address(&entry->register_region);
return 0;
}
@@ -276,7 +276,7 @@ static int post_unmap_gar_callback(struct apei_exec_context *ctx,
u8 ins = entry->instruction;
if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
- acpi_os_unmap_generic_address(&entry->register_region);
+ apei_unmap_generic_address(&entry->register_region);
return 0;
}
@@ -606,6 +606,19 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
return 0;
}
+int apei_map_generic_address(struct acpi_generic_address *reg)
+{
+ int rc;
+ u32 access_bit_width;
+ u64 address;
+
+ rc = apei_check_gar(reg, &address, &access_bit_width);
+ if (rc)
+ return rc;
+ return acpi_os_map_generic_address(reg);
+}
+EXPORT_SYMBOL_GPL(apei_map_generic_address);
+
/* read GAR in interrupt (including NMI) or process context */
int apei_read(u64 *val, struct acpi_generic_address *reg)
{
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index cca240a..f220d64 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -7,6 +7,8 @@
#define APEI_INTERNAL_H
#include <linux/cper.h>
+#include <linux/acpi.h>
+#include <linux/acpi_io.h>
struct apei_exec_context;
@@ -68,6 +70,13 @@ static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 actio
/* IP has been set in instruction function */
#define APEI_EXEC_SET_IP 1
+int apei_map_generic_address(struct acpi_generic_address *reg);
+
+static inline void apei_unmap_generic_address(struct acpi_generic_address *reg)
+{
+ acpi_os_unmap_generic_address(reg);
+}
+
int apei_read(u64 *val, struct acpi_generic_address *reg);
int apei_write(u64 val, struct acpi_generic_address *reg);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 9b3cac0..1599566 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -301,7 +301,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
if (!ghes)
return ERR_PTR(-ENOMEM);
ghes->generic = generic;
- rc = acpi_os_map_generic_address(&generic->error_status_address);
+ rc = apei_map_generic_address(&generic->error_status_address);
if (rc)
goto err_free;
error_block_length = generic->error_block_length;
@@ -321,7 +321,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
return ghes;
err_unmap:
- acpi_os_unmap_generic_address(&generic->error_status_address);
+ apei_unmap_generic_address(&generic->error_status_address);
err_free:
kfree(ghes);
return ERR_PTR(rc);
@@ -330,7 +330,7 @@ err_free:
static void ghes_fini(struct ghes *ghes)
{
kfree(ghes->estatus);
- acpi_os_unmap_generic_address(&ghes->generic->error_status_address);
+ apei_unmap_generic_address(&ghes->generic->error_status_address);
}
enum {
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 86933ca..7dd3f9f 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -643,11 +643,19 @@ static int acpi_battery_update(struct acpi_battery *battery)
static void acpi_battery_refresh(struct acpi_battery *battery)
{
+ int power_unit;
+
if (!battery->bat.dev)
return;
+ power_unit = battery->power_unit;
+
acpi_battery_get_info(battery);
- /* The battery may have changed its reporting units. */
+
+ if (power_unit == battery->power_unit)
+ return;
+
+ /* The battery has changed its reporting units. */
sysfs_remove_battery(battery);
sysfs_add_battery(battery);
}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 3188da3..adceafd 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -182,41 +182,66 @@ EXPORT_SYMBOL(acpi_bus_get_private_data);
Power Management
-------------------------------------------------------------------------- */
+static const char *state_string(int state)
+{
+ switch (state) {
+ case ACPI_STATE_D0:
+ return "D0";
+ case ACPI_STATE_D1:
+ return "D1";
+ case ACPI_STATE_D2:
+ return "D2";
+ case ACPI_STATE_D3_HOT:
+ return "D3hot";
+ case ACPI_STATE_D3_COLD:
+ return "D3";
+ default:
+ return "(unknown)";
+ }
+}
+
static int __acpi_bus_get_power(struct acpi_device *device, int *state)
{
- int result = 0;
- acpi_status status = 0;
- unsigned long long psc = 0;
+ int result = ACPI_STATE_UNKNOWN;
if (!device || !state)
return -EINVAL;
- *state = ACPI_STATE_UNKNOWN;
-
- if (device->flags.power_manageable) {
- /*
- * Get the device's power state either directly (via _PSC) or
- * indirectly (via power resources).
- */
- if (device->power.flags.power_resources) {
- result = acpi_power_get_inferred_state(device, state);
- if (result)
- return result;
- } else if (device->power.flags.explicit_get) {
- status = acpi_evaluate_integer(device->handle, "_PSC",
- NULL, &psc);
- if (ACPI_FAILURE(status))
- return -ENODEV;
- *state = (int)psc;
- }
- } else {
+ if (!device->flags.power_manageable) {
/* TBD: Non-recursive algorithm for walking up hierarchy. */
*state = device->parent ?
device->parent->power.state : ACPI_STATE_D0;
+ goto out;
+ }
+
+ /*
+ * Get the device's power state either directly (via _PSC) or
+ * indirectly (via power resources).
+ */
+ if (device->power.flags.explicit_get) {
+ unsigned long long psc;
+ acpi_status status = acpi_evaluate_integer(device->handle,
+ "_PSC", NULL, &psc);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ result = psc;
+ }
+ /* The test below covers ACPI_STATE_UNKNOWN too. */
+ if (result <= ACPI_STATE_D2) {
+ ; /* Do nothing. */
+ } else if (device->power.flags.power_resources) {
+ int error = acpi_power_get_inferred_state(device, &result);
+ if (error)
+ return error;
+ } else if (result == ACPI_STATE_D3_HOT) {
+ result = ACPI_STATE_D3;
}
+ *state = result;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n",
- device->pnp.bus_id, *state));
+ out:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n",
+ device->pnp.bus_id, state_string(*state)));
return 0;
}
@@ -234,13 +259,14 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
/* Make sure this is a valid target state */
if (state == device->power.state) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
- state));
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n",
+ state_string(state)));
return 0;
}
if (!device->power.states[state].flags.valid) {
- printk(KERN_WARNING PREFIX "Device does not support D%d\n", state);
+ printk(KERN_WARNING PREFIX "Device does not support %s\n",
+ state_string(state));
return -ENODEV;
}
if (device->parent && (state < device->parent->power.state)) {
@@ -294,13 +320,13 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
end:
if (result)
printk(KERN_WARNING PREFIX
- "Device [%s] failed to transition to D%d\n",
- device->pnp.bus_id, state);
+ "Device [%s] failed to transition to %s\n",
+ device->pnp.bus_id, state_string(state));
else {
device->power.state = state;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Device [%s] transitioned to D%d\n",
- device->pnp.bus_id, state));
+ "Device [%s] transitioned to %s\n",
+ device->pnp.bus_id, state_string(state)));
}
return result;
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 0500f71..dd6d6a3 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -631,7 +631,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
* We know a device's inferred power state when all the resources
* required for a given D-state are 'on'.
*/
- for (i = ACPI_STATE_D0; i < ACPI_STATE_D3_HOT; i++) {
+ for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
list = &device->power.states[i].resources;
if (list->count < 1)
continue;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f3decb3..47a8caa 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -224,6 +224,7 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
/*
* Suspend / resume control
*/
+static int acpi_idle_suspend;
static u32 saved_bm_rld;
static void acpi_idle_bm_rld_save(void)
@@ -242,13 +243,21 @@ static void acpi_idle_bm_rld_restore(void)
int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
{
+ if (acpi_idle_suspend == 1)
+ return 0;
+
acpi_idle_bm_rld_save();
+ acpi_idle_suspend = 1;
return 0;
}
int acpi_processor_resume(struct acpi_device * device)
{
+ if (acpi_idle_suspend == 0)
+ return 0;
+
acpi_idle_bm_rld_restore();
+ acpi_idle_suspend = 0;
return 0;
}
@@ -754,6 +763,12 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
local_irq_disable();
+ if (acpi_idle_suspend) {
+ local_irq_enable();
+ cpu_relax();
+ return -EBUSY;
+ }
+
lapic_timer_state_broadcast(pr, cx, 1);
kt1 = ktime_get_real();
acpi_idle_do_entry(cx);
@@ -823,6 +838,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
local_irq_disable();
+ if (acpi_idle_suspend) {
+ local_irq_enable();
+ cpu_relax();
+ return -EBUSY;
+ }
+
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
@@ -907,14 +928,21 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
drv, drv->safe_state_index);
} else {
local_irq_disable();
- acpi_safe_halt();
+ if (!acpi_idle_suspend)
+ acpi_safe_halt();
local_irq_enable();
- return -EINVAL;
+ return -EBUSY;
}
}
local_irq_disable();
+ if (acpi_idle_suspend) {
+ local_irq_enable();
+ cpu_relax();
+ return -EBUSY;
+ }
+
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 0af48a8..a093dc1 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -333,6 +333,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
struct acpi_buffer state = { 0, NULL };
union acpi_object *pss = NULL;
int i;
+ int last_invalid = -1;
status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
@@ -394,14 +395,33 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
((u32)(px->core_frequency * 1000) !=
(px->core_frequency * 1000))) {
printk(KERN_ERR FW_BUG PREFIX
- "Invalid BIOS _PSS frequency: 0x%llx MHz\n",
- px->core_frequency);
- result = -EFAULT;
- kfree(pr->performance->states);
- goto end;
+ "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n",
+ pr->id, px->core_frequency);
+ if (last_invalid == -1)
+ last_invalid = i;
+ } else {
+ if (last_invalid != -1) {
+ /*
+ * Copy this valid entry over last_invalid entry
+ */
+ memcpy(&(pr->performance->states[last_invalid]),
+ px, sizeof(struct acpi_processor_px));
+ ++last_invalid;
+ }
}
}
+ if (last_invalid == 0) {
+ printk(KERN_ERR FW_BUG PREFIX
+ "No valid BIOS _PSS frequency found for processor %d\n", pr->id);
+ result = -EFAULT;
+ kfree(pr->performance->states);
+ pr->performance->states = NULL;
+ }
+
+ if (last_invalid > 0)
+ pr->performance->state_count = last_invalid;
+
end:
kfree(buffer.pointer);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 85cbfdc..c8a1f3b 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1567,6 +1567,7 @@ static int acpi_bus_scan_fixed(void)
ACPI_BUS_TYPE_POWER_BUTTON,
ACPI_STA_DEFAULT,
&ops);
+ device_init_wakeup(&device->dev, true);
}
if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 74ee4ab..88561029 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -57,6 +57,7 @@ MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
static u8 sleep_states[ACPI_S_STATE_COUNT];
+static bool pwr_btn_event_pending;
static void acpi_sleep_tts_switch(u32 acpi_state)
{
@@ -184,6 +185,14 @@ static int acpi_pm_prepare(void)
return error;
}
+static int find_powerf_dev(struct device *dev, void *data)
+{
+ struct acpi_device *device = to_acpi_device(dev);
+ const char *hid = acpi_device_hid(device);
+
+ return !strcmp(hid, ACPI_BUTTON_HID_POWERF);
+}
+
/**
* acpi_pm_finish - Instruct the platform to leave a sleep state.
*
@@ -192,6 +201,7 @@ static int acpi_pm_prepare(void)
*/
static void acpi_pm_finish(void)
{
+ struct device *pwr_btn_dev;
u32 acpi_state = acpi_target_sleep_state;
acpi_ec_unblock_transactions();
@@ -209,6 +219,23 @@ static void acpi_pm_finish(void)
acpi_set_firmware_waking_vector((acpi_physical_address) 0);
acpi_target_sleep_state = ACPI_STATE_S0;
+
+ /* If we were woken with the fixed power button, provide a small
+ * hint to userspace in the form of a wakeup event on the fixed power
+ * button device (if it can be found).
+ *
+ * We delay the event generation til now, as the PM layer requires
+ * timekeeping to be running before we generate events. */
+ if (!pwr_btn_event_pending)
+ return;
+
+ pwr_btn_event_pending = false;
+ pwr_btn_dev = bus_find_device(&acpi_bus_type, NULL, NULL,
+ find_powerf_dev);
+ if (pwr_btn_dev) {
+ pm_wakeup_event(pwr_btn_dev, 0);
+ put_device(pwr_btn_dev);
+ }
}
/**
@@ -298,9 +325,23 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
/* ACPI 3.0 specs (P62) says that it's the responsibility
* of the OSPM to clear the status bit [ implying that the
* POWER_BUTTON event should not reach userspace ]
+ *
+ * However, we do generate a small hint for userspace in the form of
+ * a wakeup event. We flag this condition for now and generate the
+ * event later, as we're currently too early in resume to be able to
+ * generate wakeup events.
*/
- if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3))
- acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
+ if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
+ acpi_event_status pwr_btn_status;
+
+ acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
+
+ if (pwr_btn_status & ACPI_EVENT_FLAG_SET) {
+ acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
+ /* Flag for later */
+ pwr_btn_event_pending = true;
+ }
+ }
/*
* Disable and clear GPE status before interrupt is enabled. Some GPEs
@@ -730,8 +771,8 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
* can wake the system. _S0W may be valid, too.
*/
if (acpi_target_sleep_state == ACPI_STATE_S0 ||
- (device_may_wakeup(dev) &&
- adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
+ (device_may_wakeup(dev) && adev->wakeup.flags.valid &&
+ adev->wakeup.sleep_state >= acpi_target_sleep_state)) {
acpi_status status;
acpi_method[3] = 'W';
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 9f66181..240a244 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
{
int result = 0;
- if (!strncmp(val, "enable", strlen("enable") - 1)) {
+ if (!strncmp(val, "enable", strlen("enable"))) {
result = acpi_debug_trace(trace_method_name, trace_debug_level,
trace_debug_layer, 0);
if (result)
@@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
goto exit;
}
- if (!strncmp(val, "disable", strlen("disable") - 1)) {
+ if (!strncmp(val, "disable", strlen("disable"))) {
int name = 0;
result = acpi_debug_trace((char *)&name, trace_debug_level,
trace_debug_layer, 0);
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 9577b6f..1e0a9e1 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -558,6 +558,8 @@ acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
+ if (!video->cap._DOS)
+ return 0;
if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1)
return -EINVAL;
@@ -1687,10 +1689,6 @@ static int acpi_video_bus_add(struct acpi_device *device)
set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
set_bit(KEY_DISPLAY_OFF, input->keybit);
- error = input_register_device(input);
- if (error)
- goto err_stop_video;
-
printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n",
ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
video->flags.multihead ? "yes" : "no",
@@ -1701,12 +1699,16 @@ static int acpi_video_bus_add(struct acpi_device *device)
video->pm_nb.priority = 0;
error = register_pm_notifier(&video->pm_nb);
if (error)
- goto err_unregister_input_dev;
+ goto err_stop_video;
+
+ error = input_register_device(input);
+ if (error)
+ goto err_unregister_pm_notifier;
return 0;
- err_unregister_input_dev:
- input_unregister_device(input);
+ err_unregister_pm_notifier:
+ unregister_pm_notifier(&video->pm_nb);
err_stop_video:
acpi_video_bus_stop_devices(video);
err_free_input_dev:
@@ -1743,9 +1745,18 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type)
return 0;
}
+static int __init is_i740(struct pci_dev *dev)
+{
+ if (dev->device == 0x00D1)
+ return 1;
+ if (dev->device == 0x7000)
+ return 1;
+ return 0;
+}
+
static int __init intel_opregion_present(void)
{
-#if defined(CONFIG_DRM_I915) || defined(CONFIG_DRM_I915_MODULE)
+ int opregion = 0;
struct pci_dev *dev = NULL;
u32 address;
@@ -1754,13 +1765,15 @@ static int __init intel_opregion_present(void)
continue;
if (dev->vendor != PCI_VENDOR_ID_INTEL)
continue;
+ /* We don't want to poke around undefined i740 registers */
+ if (is_i740(dev))
+ continue;
pci_read_config_dword(dev, 0xfc, &address);
if (!address)
continue;
- return 1;
+ opregion = 1;
}
-#endif
- return 0;
+ return opregion;
}
int acpi_video_register(void)
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 3239517..ac6a5be 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -4,7 +4,7 @@
* Arasan Compact Flash host controller source file
*
* Copyright (C) 2011 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -959,7 +959,7 @@ static struct platform_driver arasan_cf_driver = {
module_platform_driver(arasan_cf_driver);
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 1b1cbb5..dcb8a6e 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -100,7 +100,7 @@ static void driver_deferred_probe_add(struct device *dev)
mutex_lock(&deferred_probe_mutex);
if (list_empty(&dev->p->deferred_probe)) {
dev_dbg(dev, "Added to deferred list\n");
- list_add(&dev->p->deferred_probe, &deferred_probe_pending_list);
+ list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
}
mutex_unlock(&deferred_probe_mutex);
}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e0fb5b0..9cb845e 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1031,7 +1031,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_wait_for_children(dev, async);
if (async_error)
- return 0;
+ goto Complete;
pm_runtime_get_noresume(dev);
if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
@@ -1040,7 +1040,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (pm_wakeup_pending()) {
pm_runtime_put_sync(dev);
async_error = -EBUSY;
- return 0;
+ goto Complete;
}
device_lock(dev);
@@ -1097,6 +1097,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
device_unlock(dev);
+
+ Complete:
complete_all(&dev->power.completion);
if (error) {
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 0bcda48..c89aa01 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -246,11 +246,11 @@ struct regmap *regmap_init(struct device *dev,
map->lock = regmap_lock_mutex;
map->unlock = regmap_unlock_mutex;
}
- map->format.buf_size = (config->reg_bits + config->val_bits) / 8;
map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
- map->format.buf_size += map->format.pad_bytes;
+ map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
+ config->val_bits + config->pad_bits, 8);
map->reg_shift = config->pad_bits % 8;
if (config->reg_stride)
map->reg_stride = config->reg_stride;
@@ -368,7 +368,7 @@ struct regmap *regmap_init(struct device *dev,
ret = regcache_init(map, config);
if (ret < 0)
- goto err_free_workbuf;
+ goto err_debugfs;
/* Add a devres resource for dev_get_regmap() */
m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
@@ -383,7 +383,8 @@ struct regmap *regmap_init(struct device *dev,
err_cache:
regcache_exit(map);
-err_free_workbuf:
+err_debugfs:
+ regmap_debugfs_exit(map);
kfree(map->work_buf);
err_map:
kfree(map);
@@ -471,6 +472,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
return ret;
}
+EXPORT_SYMBOL_GPL(regmap_reinit_cache);
/**
* regmap_exit(): Free a previously allocated register map
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index b5c5ff5..fcb956b 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1475,10 +1475,17 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
first_word = 0;
spin_lock_irq(&b->bm_lock);
}
-
/* last page (respectively only page, for first page == last page) */
last_word = MLPP(el >> LN2_BPL);
- bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
+
+ /* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples).
+ * ==> e = 32767, el = 32768, last_page = 2,
+ * and now last_word = 0.
+ * We do not want to touch last_page in this case,
+ * as we did not allocate it, it is not present in bitmap->bm_pages.
+ */
+ if (last_word)
+ bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
/* possibly trailing bits.
* example: (e & 63) == 63, el will be e+1.
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 9c5c849..8e93a6a 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -472,12 +472,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING;
- D_ASSERT(!(req->rq_state & RQ_NET_MASK));
+ if (req->rq_state & RQ_LOCAL_ABORTED) {
+ _req_may_be_done(req, m);
+ break;
+ }
__drbd_chk_io_error(mdev, false);
goto_queue_for_net_read:
+ D_ASSERT(!(req->rq_state & RQ_NET_MASK));
+
/* no point in retrying if there is no good remote data,
* or we have no connection. */
if (mdev->state.pdsk != D_UP_TO_DATE) {
@@ -765,6 +770,40 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s
return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
}
+static void maybe_pull_ahead(struct drbd_conf *mdev)
+{
+ int congested = 0;
+
+ /* If I don't even have good local storage, we can not reasonably try
+ * to pull ahead of the peer. We also need the local reference to make
+ * sure mdev->act_log is there.
+ * Note: caller has to make sure that net_conf is there.
+ */
+ if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
+ return;
+
+ if (mdev->net_conf->cong_fill &&
+ atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
+ dev_info(DEV, "Congestion-fill threshold reached\n");
+ congested = 1;
+ }
+
+ if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
+ dev_info(DEV, "Congestion-extents threshold reached\n");
+ congested = 1;
+ }
+
+ if (congested) {
+ queue_barrier(mdev); /* last barrier, after mirrored writes */
+
+ if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
+ _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
+ else /*mdev->net_conf->on_congestion == OC_DISCONNECT */
+ _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
+ }
+ put_ldev(mdev);
+}
+
static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
{
const int rw = bio_rw(bio);
@@ -972,29 +1011,8 @@ allocate_barrier:
_req_mod(req, queue_for_send_oos);
if (remote &&
- mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
- int congested = 0;
-
- if (mdev->net_conf->cong_fill &&
- atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
- dev_info(DEV, "Congestion-fill threshold reached\n");
- congested = 1;
- }
-
- if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
- dev_info(DEV, "Congestion-extents threshold reached\n");
- congested = 1;
- }
-
- if (congested) {
- queue_barrier(mdev); /* last barrier, after mirrored writes */
-
- if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
- _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
- else /*mdev->net_conf->on_congestion == OC_DISCONNECT */
- _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
- }
- }
+ mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)
+ maybe_pull_ahead(mdev);
spin_unlock_irq(&mdev->req_lock);
kfree(b); /* if someone else has beaten us to it... */
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index cce7df3..553f43a 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -671,6 +671,7 @@ static void __reschedule_timeout(int drive, const char *message)
if (drive == current_reqD)
drive = current_drive;
+ __cancel_delayed_work(&fd_timeout);
if (drive < 0 || drive >= N_DRIVE) {
delay = 20UL * HZ;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 264bc77..a8fddeb 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -37,6 +37,7 @@
#include <linux/kthread.h>
#include <../drivers/ata/ahci.h>
#include <linux/export.h>
+#include <linux/debugfs.h>
#include "mtip32xx.h"
#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
@@ -85,6 +86,7 @@ static int instance;
* allocated in mtip_init().
*/
static int mtip_major;
+static struct dentry *dfs_parent;
static DEFINE_SPINLOCK(rssd_index_lock);
static DEFINE_IDA(rssd_index_ida);
@@ -2546,7 +2548,7 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
}
/*
- * Sysfs register/status dump.
+ * Sysfs status dump.
*
* @dev Pointer to the device structure, passed by the kernrel.
* @attr Pointer to the device_attribute structure passed by the kernel.
@@ -2555,45 +2557,68 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
* return value
* The size, in bytes, of the data copied into buf.
*/
-static ssize_t mtip_hw_show_registers(struct device *dev,
+static ssize_t mtip_hw_show_status(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- u32 group_allocated;
struct driver_data *dd = dev_to_disk(dev)->private_data;
int size = 0;
+
+ if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
+ size += sprintf(buf, "%s", "thermal_shutdown\n");
+ else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
+ size += sprintf(buf, "%s", "write_protect\n");
+ else
+ size += sprintf(buf, "%s", "online\n");
+
+ return size;
+}
+
+static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
+
+static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
+ size_t len, loff_t *offset)
+{
+ struct driver_data *dd = (struct driver_data *)f->private_data;
+ char buf[MTIP_DFS_MAX_BUF_SIZE];
+ u32 group_allocated;
+ int size = *offset;
int n;
- size += sprintf(&buf[size], "Hardware\n--------\n");
- size += sprintf(&buf[size], "S ACTive : [ 0x");
+ if (!len || size)
+ return 0;
+
+ if (size < 0)
+ return -EINVAL;
+
+ size += sprintf(&buf[size], "H/ S ACTive : [ 0x");
for (n = dd->slot_groups-1; n >= 0; n--)
size += sprintf(&buf[size], "%08X ",
readl(dd->port->s_active[n]));
size += sprintf(&buf[size], "]\n");
- size += sprintf(&buf[size], "Command Issue : [ 0x");
+ size += sprintf(&buf[size], "H/ Command Issue : [ 0x");
for (n = dd->slot_groups-1; n >= 0; n--)
size += sprintf(&buf[size], "%08X ",
readl(dd->port->cmd_issue[n]));
size += sprintf(&buf[size], "]\n");
- size += sprintf(&buf[size], "Completed : [ 0x");
+ size += sprintf(&buf[size], "H/ Completed : [ 0x");
for (n = dd->slot_groups-1; n >= 0; n--)
size += sprintf(&buf[size], "%08X ",
readl(dd->port->completed[n]));
size += sprintf(&buf[size], "]\n");
- size += sprintf(&buf[size], "PORT IRQ STAT : [ 0x%08X ]\n",
+ size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n",
readl(dd->port->mmio + PORT_IRQ_STAT));
- size += sprintf(&buf[size], "HOST IRQ STAT : [ 0x%08X ]\n",
+ size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n",
readl(dd->mmio + HOST_IRQ_STAT));
size += sprintf(&buf[size], "\n");
- size += sprintf(&buf[size], "Local\n-----\n");
- size += sprintf(&buf[size], "Allocated : [ 0x");
+ size += sprintf(&buf[size], "L/ Allocated : [ 0x");
for (n = dd->slot_groups-1; n >= 0; n--) {
if (sizeof(long) > sizeof(u32))
@@ -2605,7 +2630,7 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
}
size += sprintf(&buf[size], "]\n");
- size += sprintf(&buf[size], "Commands in Q: [ 0x");
+ size += sprintf(&buf[size], "L/ Commands in Q : [ 0x");
for (n = dd->slot_groups-1; n >= 0; n--) {
if (sizeof(long) > sizeof(u32))
@@ -2617,44 +2642,53 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
}
size += sprintf(&buf[size], "]\n");
- return size;
+ *offset = size <= len ? size : len;
+ size = copy_to_user(ubuf, buf, *offset);
+ if (size)
+ return -EFAULT;
+
+ return *offset;
}
-static ssize_t mtip_hw_show_status(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
+ size_t len, loff_t *offset)
{
- struct driver_data *dd = dev_to_disk(dev)->private_data;
- int size = 0;
+ struct driver_data *dd = (struct driver_data *)f->private_data;
+ char buf[MTIP_DFS_MAX_BUF_SIZE];
+ int size = *offset;
- if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
- size += sprintf(buf, "%s", "thermal_shutdown\n");
- else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
- size += sprintf(buf, "%s", "write_protect\n");
- else
- size += sprintf(buf, "%s", "online\n");
-
- return size;
-}
+ if (!len || size)
+ return 0;
-static ssize_t mtip_hw_show_flags(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct driver_data *dd = dev_to_disk(dev)->private_data;
- int size = 0;
+ if (size < 0)
+ return -EINVAL;
- size += sprintf(&buf[size], "Flag in port struct : [ %08lX ]\n",
+ size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
dd->port->flags);
- size += sprintf(&buf[size], "Flag in dd struct : [ %08lX ]\n",
+ size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n",
dd->dd_flag);
- return size;
+ *offset = size <= len ? size : len;
+ size = copy_to_user(ubuf, buf, *offset);
+ if (size)
+ return -EFAULT;
+
+ return *offset;
}
-static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
-static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
-static DEVICE_ATTR(flags, S_IRUGO, mtip_hw_show_flags, NULL);
+static const struct file_operations mtip_regs_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = mtip_hw_read_registers,
+ .llseek = no_llseek,
+};
+
+static const struct file_operations mtip_flags_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = mtip_hw_read_flags,
+ .llseek = no_llseek,
+};
/*
* Create the sysfs related attributes.
@@ -2671,15 +2705,9 @@ static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
if (!kobj || !dd)
return -EINVAL;
- if (sysfs_create_file(kobj, &dev_attr_registers.attr))
- dev_warn(&dd->pdev->dev,
- "Error creating 'registers' sysfs entry\n");
if (sysfs_create_file(kobj, &dev_attr_status.attr))
dev_warn(&dd->pdev->dev,
"Error creating 'status' sysfs entry\n");
- if (sysfs_create_file(kobj, &dev_attr_flags.attr))
- dev_warn(&dd->pdev->dev,
- "Error creating 'flags' sysfs entry\n");
return 0;
}
@@ -2698,13 +2726,39 @@ static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
if (!kobj || !dd)
return -EINVAL;
- sysfs_remove_file(kobj, &dev_attr_registers.attr);
sysfs_remove_file(kobj, &dev_attr_status.attr);
- sysfs_remove_file(kobj, &dev_attr_flags.attr);
return 0;
}
+static int mtip_hw_debugfs_init(struct driver_data *dd)
+{
+ if (!dfs_parent)
+ return -1;
+
+ dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
+ if (IS_ERR_OR_NULL(dd->dfs_node)) {
+ dev_warn(&dd->pdev->dev,
+ "Error creating node %s under debugfs\n",
+ dd->disk->disk_name);
+ dd->dfs_node = NULL;
+ return -1;
+ }
+
+ debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd,
+ &mtip_flags_fops);
+ debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd,
+ &mtip_regs_fops);
+
+ return 0;
+}
+
+static void mtip_hw_debugfs_exit(struct driver_data *dd)
+{
+ debugfs_remove_recursive(dd->dfs_node);
+}
+
+
/*
* Perform any init/resume time hardware setup
*
@@ -3730,6 +3784,7 @@ skip_create_disk:
mtip_hw_sysfs_init(dd, kobj);
kobject_put(kobj);
}
+ mtip_hw_debugfs_init(dd);
if (dd->mtip_svc_handler) {
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
@@ -3755,6 +3810,8 @@ start_service_thread:
return rv;
kthread_run_error:
+ mtip_hw_debugfs_exit(dd);
+
/* Delete our gendisk. This also removes the device from /dev */
del_gendisk(dd->disk);
@@ -3805,6 +3862,7 @@ static int mtip_block_remove(struct driver_data *dd)
kobject_put(kobj);
}
}
+ mtip_hw_debugfs_exit(dd);
/*
* Delete our gendisk structure. This also removes the device
@@ -4152,10 +4210,20 @@ static int __init mtip_init(void)
}
mtip_major = error;
+ if (!dfs_parent) {
+ dfs_parent = debugfs_create_dir("rssd", NULL);
+ if (IS_ERR_OR_NULL(dfs_parent)) {
+ printk(KERN_WARNING "Error creating debugfs parent\n");
+ dfs_parent = NULL;
+ }
+ }
+
/* Register our PCI operations. */
error = pci_register_driver(&mtip_pci_driver);
- if (error)
+ if (error) {
+ debugfs_remove(dfs_parent);
unregister_blkdev(mtip_major, MTIP_DRV_NAME);
+ }
return error;
}
@@ -4172,6 +4240,8 @@ static int __init mtip_init(void)
*/
static void __exit mtip_exit(void)
{
+ debugfs_remove_recursive(dfs_parent);
+
/* Release the allocated major block device number. */
unregister_blkdev(mtip_major, MTIP_DRV_NAME);
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index b2c88da..f51fc23 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -26,7 +26,6 @@
#include <linux/ata.h>
#include <linux/interrupt.h>
#include <linux/genhd.h>
-#include <linux/version.h>
/* Offset of Subsystem Device ID in pci confoguration space */
#define PCI_SUBSYSTEM_DEVICEID 0x2E
@@ -111,6 +110,8 @@
#define dbg_printk(format, arg...)
#endif
+#define MTIP_DFS_MAX_BUF_SIZE 1024
+
#define __force_bit2int (unsigned int __force)
enum {
@@ -447,6 +448,8 @@ struct driver_data {
unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
+
+ struct dentry *dfs_node;
};
#endif
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index aa27120..9a72277 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -513,6 +513,44 @@ static void process_page(unsigned long data)
}
}
+struct mm_plug_cb {
+ struct blk_plug_cb cb;
+ struct cardinfo *card;
+};
+
+static void mm_unplug(struct blk_plug_cb *cb)
+{
+ struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb);
+
+ spin_lock_irq(&mmcb->card->lock);
+ activate(mmcb->card);
+ spin_unlock_irq(&mmcb->card->lock);
+ kfree(mmcb);
+}
+
+static int mm_check_plugged(struct cardinfo *card)
+{
+ struct blk_plug *plug = current->plug;
+ struct mm_plug_cb *mmcb;
+
+ if (!plug)
+ return 0;
+
+ list_for_each_entry(mmcb, &plug->cb_list, cb.list) {
+ if (mmcb->cb.callback == mm_unplug && mmcb->card == card)
+ return 1;
+ }
+ /* Not currently on the callback list */
+ mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC);
+ if (!mmcb)
+ return 0;
+
+ mmcb->card = card;
+ mmcb->cb.callback = mm_unplug;
+ list_add(&mmcb->cb.list, &plug->cb_list);
+ return 1;
+}
+
static void mm_make_request(struct request_queue *q, struct bio *bio)
{
struct cardinfo *card = q->queuedata;
@@ -523,6 +561,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
*card->biotail = bio;
bio->bi_next = NULL;
card->biotail = &bio->bi_next;
+ if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
+ activate(card);
spin_unlock_irq(&card->lock);
return;
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 773cf27..9ad3b5e 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -257,6 +257,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
break;
case BLKIF_OP_DISCARD:
dst->u.discard.flag = src->u.discard.flag;
+ dst->u.discard.id = src->u.discard.id;
dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
@@ -287,6 +288,7 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
break;
case BLKIF_OP_DISCARD:
dst->u.discard.flag = src->u.discard.flag;
+ dst->u.discard.id = src->u.discard.id;
dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 60eed4b..e4fb337 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -141,14 +141,36 @@ static int get_id_from_freelist(struct blkfront_info *info)
return free;
}
-static void add_id_to_freelist(struct blkfront_info *info,
+static int add_id_to_freelist(struct blkfront_info *info,
unsigned long id)
{
+ if (info->shadow[id].req.u.rw.id != id)
+ return -EINVAL;
+ if (info->shadow[id].request == NULL)
+ return -EINVAL;
info->shadow[id].req.u.rw.id = info->shadow_free;
info->shadow[id].request = NULL;
info->shadow_free = id;
+ return 0;
}
+static const char *op_name(int op)
+{
+ static const char *const names[] = {
+ [BLKIF_OP_READ] = "read",
+ [BLKIF_OP_WRITE] = "write",
+ [BLKIF_OP_WRITE_BARRIER] = "barrier",
+ [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
+ [BLKIF_OP_DISCARD] = "discard" };
+
+ if (op < 0 || op >= ARRAY_SIZE(names))
+ return "unknown";
+
+ if (!names[op])
+ return "reserved";
+
+ return names[op];
+}
static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
{
unsigned int end = minor + nr;
@@ -746,20 +768,36 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
bret = RING_GET_RESPONSE(&info->ring, i);
id = bret->id;
+ /*
+ * The backend has messed up and given us an id that we would
+ * never have given to it (we stamp it up to BLK_RING_SIZE -
+ * look in get_id_from_freelist.
+ */
+ if (id >= BLK_RING_SIZE) {
+ WARN(1, "%s: response to %s has incorrect id (%ld)\n",
+ info->gd->disk_name, op_name(bret->operation), id);
+ /* We can't safely get the 'struct request' as
+ * the id is busted. */
+ continue;
+ }
req = info->shadow[id].request;
if (bret->operation != BLKIF_OP_DISCARD)
blkif_completion(&info->shadow[id]);
- add_id_to_freelist(info, id);
+ if (add_id_to_freelist(info, id)) {
+ WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
+ info->gd->disk_name, op_name(bret->operation), id);
+ continue;
+ }
error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
switch (bret->operation) {
case BLKIF_OP_DISCARD:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
struct request_queue *rq = info->rq;
- printk(KERN_WARNING "blkfront: %s: discard op failed\n",
- info->gd->disk_name);
+ printk(KERN_WARNING "blkfront: %s: %s op failed\n",
+ info->gd->disk_name, op_name(bret->operation));
error = -EOPNOTSUPP;
info->feature_discard = 0;
info->feature_secdiscard = 0;
@@ -771,18 +809,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
case BLKIF_OP_FLUSH_DISKCACHE:
case BLKIF_OP_WRITE_BARRIER:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
- printk(KERN_WARNING "blkfront: %s: write %s op failed\n",
- info->flush_op == BLKIF_OP_WRITE_BARRIER ?
- "barrier" : "flush disk cache",
- info->gd->disk_name);
+ printk(KERN_WARNING "blkfront: %s: %s op failed\n",
+ info->gd->disk_name, op_name(bret->operation));
error = -EOPNOTSUPP;
}
if (unlikely(bret->status == BLKIF_RSP_ERROR &&
info->shadow[id].req.u.rw.nr_segments == 0)) {
- printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n",
- info->flush_op == BLKIF_OP_WRITE_BARRIER ?
- "barrier" : "flush disk cache",
- info->gd->disk_name);
+ printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
+ info->gd->disk_name, op_name(bret->operation));
error = -EOPNOTSUPP;
}
if (unlikely(error)) {
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 764f70c..0a41852 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -898,6 +898,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_B43_HB),
ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index c009175..8e2d914 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -212,6 +212,7 @@
#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB 0x0069
#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index f518b99..731c904 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -34,8 +34,15 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
u32 *data = buf;
/* data ready? */
- if (readl(trng->base + TRNG_ODATA) & 1) {
+ if (readl(trng->base + TRNG_ISR) & 1) {
*data = readl(trng->base + TRNG_ODATA);
+ /*
+ ensure data ready is only set again AFTER the next data
+ word is ready in case it got set between checking ISR
+ and reading ODATA, so we don't risk re-reading the
+ same word
+ */
+ readl(trng->base + TRNG_ISR);
return 4;
} else
return 0;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 687b00d..9a1eb0c 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -850,18 +850,21 @@ static void clk_change_rate(struct clk *clk)
{
struct clk *child;
unsigned long old_rate;
+ unsigned long best_parent_rate = 0;
struct hlist_node *tmp;
old_rate = clk->rate;
+ if (clk->parent)
+ best_parent_rate = clk->parent->rate;
+
if (clk->ops->set_rate)
- clk->ops->set_rate(clk->hw, clk->new_rate, clk->parent->rate);
+ clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
if (clk->ops->recalc_rate)
- clk->rate = clk->ops->recalc_rate(clk->hw,
- clk->parent->rate);
+ clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
else
- clk->rate = clk->parent->rate;
+ clk->rate = best_parent_rate;
if (clk->notifier_count && old_rate != clk->rate)
__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
@@ -999,7 +1002,7 @@ static struct clk *__clk_init_parent(struct clk *clk)
if (!clk->parents)
clk->parents =
- kmalloc((sizeof(struct clk*) * clk->num_parents),
+ kzalloc((sizeof(struct clk*) * clk->num_parents),
GFP_KERNEL);
if (!clk->parents)
@@ -1064,21 +1067,24 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
old_parent = clk->parent;
- /* find index of new parent clock using cached parent ptrs */
- for (i = 0; i < clk->num_parents; i++)
- if (clk->parents[i] == parent)
- break;
+ if (!clk->parents)
+ clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
+ GFP_KERNEL);
/*
- * find index of new parent clock using string name comparison
- * also try to cache the parent to avoid future calls to __clk_lookup
+ * find index of new parent clock using cached parent ptrs,
+ * or if not yet cached, use string name comparison and cache
+ * them now to avoid future calls to __clk_lookup.
*/
- if (i == clk->num_parents)
- for (i = 0; i < clk->num_parents; i++)
- if (!strcmp(clk->parent_names[i], parent->name)) {
+ for (i = 0; i < clk->num_parents; i++) {
+ if (clk->parents && clk->parents[i] == parent)
+ break;
+ else if (!strcmp(clk->parent_names[i], parent->name)) {
+ if (clk->parents)
clk->parents[i] = __clk_lookup(parent->name);
- break;
- }
+ break;
+ }
+ }
if (i == clk->num_parents) {
pr_debug("%s: clock %s is not a possible parent of clock %s\n",
diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c
index f7be225..db2391c 100644
--- a/drivers/clk/mxs/clk-imx23.c
+++ b/drivers/clk/mxs/clk-imx23.c
@@ -71,7 +71,7 @@ static void __init clk_misc_init(void)
__mxs_setl(30 << BP_FRAC_IOFRAC, FRAC);
}
-static struct clk_lookup uart_lookups[] __initdata = {
+static struct clk_lookup uart_lookups[] = {
{ .dev_id = "duart", },
{ .dev_id = "mxs-auart.0", },
{ .dev_id = "mxs-auart.1", },
@@ -80,31 +80,31 @@ static struct clk_lookup uart_lookups[] __initdata = {
{ .dev_id = "80070000.serial", },
};
-static struct clk_lookup hbus_lookups[] __initdata = {
+static struct clk_lookup hbus_lookups[] = {
{ .dev_id = "imx23-dma-apbh", },
{ .dev_id = "80004000.dma-apbh", },
};
-static struct clk_lookup xbus_lookups[] __initdata = {
+static struct clk_lookup xbus_lookups[] = {
{ .dev_id = "duart", .con_id = "apb_pclk"},
{ .dev_id = "80070000.serial", .con_id = "apb_pclk"},
{ .dev_id = "imx23-dma-apbx", },
{ .dev_id = "80024000.dma-apbx", },
};
-static struct clk_lookup ssp_lookups[] __initdata = {
+static struct clk_lookup ssp_lookups[] = {
{ .dev_id = "imx23-mmc.0", },
{ .dev_id = "imx23-mmc.1", },
{ .dev_id = "80010000.ssp", },
{ .dev_id = "80034000.ssp", },
};
-static struct clk_lookup lcdif_lookups[] __initdata = {
+static struct clk_lookup lcdif_lookups[] = {
{ .dev_id = "imx23-fb", },
{ .dev_id = "80030000.lcdif", },
};
-static struct clk_lookup gpmi_lookups[] __initdata = {
+static struct clk_lookup gpmi_lookups[] = {
{ .dev_id = "imx23-gpmi-nand", },
{ .dev_id = "8000c000.gpmi", },
};
diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c
index 2826a26..7fad6c8 100644
--- a/drivers/clk/mxs/clk-imx28.c
+++ b/drivers/clk/mxs/clk-imx28.c
@@ -120,7 +120,7 @@ static void __init clk_misc_init(void)
writel_relaxed(val, FRAC0);
}
-static struct clk_lookup uart_lookups[] __initdata = {
+static struct clk_lookup uart_lookups[] = {
{ .dev_id = "duart", },
{ .dev_id = "mxs-auart.0", },
{ .dev_id = "mxs-auart.1", },
@@ -135,71 +135,71 @@ static struct clk_lookup uart_lookups[] __initdata = {
{ .dev_id = "80074000.serial", },
};
-static struct clk_lookup hbus_lookups[] __initdata = {
+static struct clk_lookup hbus_lookups[] = {
{ .dev_id = "imx28-dma-apbh", },
{ .dev_id = "80004000.dma-apbh", },
};
-static struct clk_lookup xbus_lookups[] __initdata = {
+static struct clk_lookup xbus_lookups[] = {
{ .dev_id = "duart", .con_id = "apb_pclk"},
{ .dev_id = "80074000.serial", .con_id = "apb_pclk"},
{ .dev_id = "imx28-dma-apbx", },
{ .dev_id = "80024000.dma-apbx", },
};
-static struct clk_lookup ssp0_lookups[] __initdata = {
+static struct clk_lookup ssp0_lookups[] = {
{ .dev_id = "imx28-mmc.0", },
{ .dev_id = "80010000.ssp", },
};
-static struct clk_lookup ssp1_lookups[] __initdata = {
+static struct clk_lookup ssp1_lookups[] = {
{ .dev_id = "imx28-mmc.1", },
{ .dev_id = "80012000.ssp", },
};
-static struct clk_lookup ssp2_lookups[] __initdata = {
+static struct clk_lookup ssp2_lookups[] = {
{ .dev_id = "imx28-mmc.2", },
{ .dev_id = "80014000.ssp", },
};
-static struct clk_lookup ssp3_lookups[] __initdata = {
+static struct clk_lookup ssp3_lookups[] = {
{ .dev_id = "imx28-mmc.3", },
{ .dev_id = "80016000.ssp", },
};
-static struct clk_lookup lcdif_lookups[] __initdata = {
+static struct clk_lookup lcdif_lookups[] = {
{ .dev_id = "imx28-fb", },
{ .dev_id = "80030000.lcdif", },
};
-static struct clk_lookup gpmi_lookups[] __initdata = {
+static struct clk_lookup gpmi_lookups[] = {
{ .dev_id = "imx28-gpmi-nand", },
{ .dev_id = "8000c000.gpmi", },
};
-static struct clk_lookup fec_lookups[] __initdata = {
+static struct clk_lookup fec_lookups[] = {
{ .dev_id = "imx28-fec.0", },
{ .dev_id = "imx28-fec.1", },
{ .dev_id = "800f0000.ethernet", },
{ .dev_id = "800f4000.ethernet", },
};
-static struct clk_lookup can0_lookups[] __initdata = {
+static struct clk_lookup can0_lookups[] = {
{ .dev_id = "flexcan.0", },
{ .dev_id = "80032000.can", },
};
-static struct clk_lookup can1_lookups[] __initdata = {
+static struct clk_lookup can1_lookups[] = {
{ .dev_id = "flexcan.1", },
{ .dev_id = "80034000.can", },
};
-static struct clk_lookup saif0_lookups[] __initdata = {
+static struct clk_lookup saif0_lookups[] = {
{ .dev_id = "mxs-saif.0", },
{ .dev_id = "80042000.saif", },
};
-static struct clk_lookup saif1_lookups[] __initdata = {
+static struct clk_lookup saif1_lookups[] = {
{ .dev_id = "mxs-saif.1", },
{ .dev_id = "80046000.saif", },
};
@@ -245,8 +245,8 @@ int __init mx28_clocks_init(void)
clks[pll2] = mxs_clk_pll("pll2", "ref_xtal", PLL2CTRL0, 23, 50000000);
clks[ref_cpu] = mxs_clk_ref("ref_cpu", "pll0", FRAC0, 0);
clks[ref_emi] = mxs_clk_ref("ref_emi", "pll0", FRAC0, 1);
- clks[ref_io0] = mxs_clk_ref("ref_io0", "pll0", FRAC0, 2);
- clks[ref_io1] = mxs_clk_ref("ref_io1", "pll0", FRAC0, 3);
+ clks[ref_io1] = mxs_clk_ref("ref_io1", "pll0", FRAC0, 2);
+ clks[ref_io0] = mxs_clk_ref("ref_io0", "pll0", FRAC0, 3);
clks[ref_pix] = mxs_clk_ref("ref_pix", "pll0", FRAC1, 0);
clks[ref_hsadc] = mxs_clk_ref("ref_hsadc", "pll0", FRAC1, 1);
clks[ref_gpmi] = mxs_clk_ref("ref_gpmi", "pll0", FRAC1, 2);
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c
index af34074..6756e7c 100644
--- a/drivers/clk/spear/clk-aux-synth.c
+++ b/drivers/clk/spear/clk-aux-synth.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
index 4dbdb3f..958aa3a 100644
--- a/drivers/clk/spear/clk-frac-synth.c
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c
index b471c97..1afc18c 100644
--- a/drivers/clk/spear/clk-gpt-synth.c
+++ b/drivers/clk/spear/clk-gpt-synth.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
index dcd4bdf..5f1b6ba 100644
--- a/drivers/clk/spear/clk-vco-pll.c
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk.c b/drivers/clk/spear/clk.c
index 376d4e5..7cd6378 100644
--- a/drivers/clk/spear/clk.c
+++ b/drivers/clk/spear/clk.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk.h b/drivers/clk/spear/clk.h
index 3321c46..9317376 100644
--- a/drivers/clk/spear/clk.h
+++ b/drivers/clk/spear/clk.h
@@ -2,7 +2,7 @@
* Clock framework definitions for SPEAr platform
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index 42b68df..8f05652 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -4,7 +4,7 @@
* SPEAr1310 machine clock framework source file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index f130919..e3ea721 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -4,7 +4,7 @@
* SPEAr1340 machine clock framework source file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index 440bb3e..01dd6da 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -2,7 +2,7 @@
* SPEAr3xx machines clock framework source file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
index f9a20b3..61026ae 100644
--- a/drivers/clk/spear/spear6xx_clock.c
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -2,7 +2,7 @@
* SPEAr6xx machines clock framework source file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -298,7 +298,7 @@ void __init spear6xx_clk_init(void)
clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
GMAC_CLK_ENB, 0, &_lock);
- clk_register_clkdev(clk, NULL, "gmac");
+ clk_register_clkdev(clk, NULL, "e0800000.ethernet");
clk = clk_register_gate(NULL, "i2c_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
I2C_CLK_ENB, 0, &_lock);
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 8d81a1d..dd3e661 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
+obj-$(CONFIG_EM_TIMER_STI) += em_sti.o
obj-$(CONFIG_CLKBLD_I8253) += i8253.o
obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
new file mode 100644
index 0000000..372051d
--- /dev/null
+++ b/drivers/clocksource/em_sti.c
@@ -0,0 +1,406 @@
+/*
+ * Emma Mobile Timer Support - STI
+ *
+ * Copyright (C) 2012 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+enum { USER_CLOCKSOURCE, USER_CLOCKEVENT, USER_NR };
+
+struct em_sti_priv {
+ void __iomem *base;
+ struct clk *clk;
+ struct platform_device *pdev;
+ unsigned int active[USER_NR];
+ unsigned long rate;
+ raw_spinlock_t lock;
+ struct clock_event_device ced;
+ struct clocksource cs;
+};
+
+#define STI_CONTROL 0x00
+#define STI_COMPA_H 0x10
+#define STI_COMPA_L 0x14
+#define STI_COMPB_H 0x18
+#define STI_COMPB_L 0x1c
+#define STI_COUNT_H 0x20
+#define STI_COUNT_L 0x24
+#define STI_COUNT_RAW_H 0x28
+#define STI_COUNT_RAW_L 0x2c
+#define STI_SET_H 0x30
+#define STI_SET_L 0x34
+#define STI_INTSTATUS 0x40
+#define STI_INTRAWSTATUS 0x44
+#define STI_INTENSET 0x48
+#define STI_INTENCLR 0x4c
+#define STI_INTFFCLR 0x50
+
+static inline unsigned long em_sti_read(struct em_sti_priv *p, int offs)
+{
+ return ioread32(p->base + offs);
+}
+
+static inline void em_sti_write(struct em_sti_priv *p, int offs,
+ unsigned long value)
+{
+ iowrite32(value, p->base + offs);
+}
+
+static int em_sti_enable(struct em_sti_priv *p)
+{
+ int ret;
+
+ /* enable clock */
+ ret = clk_enable(p->clk);
+ if (ret) {
+ dev_err(&p->pdev->dev, "cannot enable clock\n");
+ return ret;
+ }
+
+ /* configure channel, periodic mode and maximum timeout */
+ p->rate = clk_get_rate(p->clk);
+
+ /* reset the counter */
+ em_sti_write(p, STI_SET_H, 0x40000000);
+ em_sti_write(p, STI_SET_L, 0x00000000);
+
+ /* mask and clear pending interrupts */
+ em_sti_write(p, STI_INTENCLR, 3);
+ em_sti_write(p, STI_INTFFCLR, 3);
+
+ /* enable updates of counter registers */
+ em_sti_write(p, STI_CONTROL, 1);
+
+ return 0;
+}
+
+static void em_sti_disable(struct em_sti_priv *p)
+{
+ /* mask interrupts */
+ em_sti_write(p, STI_INTENCLR, 3);
+
+ /* stop clock */
+ clk_disable(p->clk);
+}
+
+static cycle_t em_sti_count(struct em_sti_priv *p)
+{
+ cycle_t ticks;
+ unsigned long flags;
+
+ /* the STI hardware buffers the 48-bit count, but to
+ * break it out into two 32-bit access the registers
+ * must be accessed in a certain order.
+ * Always read STI_COUNT_H before STI_COUNT_L.
+ */
+ raw_spin_lock_irqsave(&p->lock, flags);
+ ticks = (cycle_t)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32;
+ ticks |= em_sti_read(p, STI_COUNT_L);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
+
+ return ticks;
+}
+
+static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&p->lock, flags);
+
+ /* mask compare A interrupt */
+ em_sti_write(p, STI_INTENCLR, 1);
+
+ /* update compare A value */
+ em_sti_write(p, STI_COMPA_H, next >> 32);
+ em_sti_write(p, STI_COMPA_L, next & 0xffffffff);
+
+ /* clear compare A interrupt source */
+ em_sti_write(p, STI_INTFFCLR, 1);
+
+ /* unmask compare A interrupt */
+ em_sti_write(p, STI_INTENSET, 1);
+
+ raw_spin_unlock_irqrestore(&p->lock, flags);
+
+ return next;
+}
+
+static irqreturn_t em_sti_interrupt(int irq, void *dev_id)
+{
+ struct em_sti_priv *p = dev_id;
+
+ p->ced.event_handler(&p->ced);
+ return IRQ_HANDLED;
+}
+
+static int em_sti_start(struct em_sti_priv *p, unsigned int user)
+{
+ unsigned long flags;
+ int used_before;
+ int ret = 0;
+
+ raw_spin_lock_irqsave(&p->lock, flags);
+ used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
+ if (!used_before)
+ ret = em_sti_enable(p);
+
+ if (!ret)
+ p->active[user] = 1;
+ raw_spin_unlock_irqrestore(&p->lock, flags);
+
+ return ret;
+}
+
+static void em_sti_stop(struct em_sti_priv *p, unsigned int user)
+{
+ unsigned long flags;
+ int used_before, used_after;
+
+ raw_spin_lock_irqsave(&p->lock, flags);
+ used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
+ p->active[user] = 0;
+ used_after = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
+
+ if (used_before && !used_after)
+ em_sti_disable(p);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs)
+{
+ return container_of(cs, struct em_sti_priv, cs);
+}
+
+static cycle_t em_sti_clocksource_read(struct clocksource *cs)
+{
+ return em_sti_count(cs_to_em_sti(cs));
+}
+
+static int em_sti_clocksource_enable(struct clocksource *cs)
+{
+ int ret;
+ struct em_sti_priv *p = cs_to_em_sti(cs);
+
+ ret = em_sti_start(p, USER_CLOCKSOURCE);
+ if (!ret)
+ __clocksource_updatefreq_hz(cs, p->rate);
+ return ret;
+}
+
+static void em_sti_clocksource_disable(struct clocksource *cs)
+{
+ em_sti_stop(cs_to_em_sti(cs), USER_CLOCKSOURCE);
+}
+
+static void em_sti_clocksource_resume(struct clocksource *cs)
+{
+ em_sti_clocksource_enable(cs);
+}
+
+static int em_sti_register_clocksource(struct em_sti_priv *p)
+{
+ struct clocksource *cs = &p->cs;
+
+ memset(cs, 0, sizeof(*cs));
+ cs->name = dev_name(&p->pdev->dev);
+ cs->rating = 200;
+ cs->read = em_sti_clocksource_read;
+ cs->enable = em_sti_clocksource_enable;
+ cs->disable = em_sti_clocksource_disable;
+ cs->suspend = em_sti_clocksource_disable;
+ cs->resume = em_sti_clocksource_resume;
+ cs->mask = CLOCKSOURCE_MASK(48);
+ cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ dev_info(&p->pdev->dev, "used as clock source\n");
+
+ /* Register with dummy 1 Hz value, gets updated in ->enable() */
+ clocksource_register_hz(cs, 1);
+ return 0;
+}
+
+static struct em_sti_priv *ced_to_em_sti(struct clock_event_device *ced)
+{
+ return container_of(ced, struct em_sti_priv, ced);
+}
+
+static void em_sti_clock_event_mode(enum clock_event_mode mode,
+ struct clock_event_device *ced)
+{
+ struct em_sti_priv *p = ced_to_em_sti(ced);
+
+ /* deal with old setting first */
+ switch (ced->mode) {
+ case CLOCK_EVT_MODE_ONESHOT:
+ em_sti_stop(p, USER_CLOCKEVENT);
+ break;
+ default:
+ break;
+ }
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_ONESHOT:
+ dev_info(&p->pdev->dev, "used for oneshot clock events\n");
+ em_sti_start(p, USER_CLOCKEVENT);
+ clockevents_config(&p->ced, p->rate);
+ break;
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
+ em_sti_stop(p, USER_CLOCKEVENT);
+ break;
+ default:
+ break;
+ }
+}
+
+static int em_sti_clock_event_next(unsigned long delta,
+ struct clock_event_device *ced)
+{
+ struct em_sti_priv *p = ced_to_em_sti(ced);
+ cycle_t next;
+ int safe;
+
+ next = em_sti_set_next(p, em_sti_count(p) + delta);
+ safe = em_sti_count(p) < (next - 1);
+
+ return !safe;
+}
+
+static void em_sti_register_clockevent(struct em_sti_priv *p)
+{
+ struct clock_event_device *ced = &p->ced;
+
+ memset(ced, 0, sizeof(*ced));
+ ced->name = dev_name(&p->pdev->dev);
+ ced->features = CLOCK_EVT_FEAT_ONESHOT;
+ ced->rating = 200;
+ ced->cpumask = cpumask_of(0);
+ ced->set_next_event = em_sti_clock_event_next;
+ ced->set_mode = em_sti_clock_event_mode;
+
+ dev_info(&p->pdev->dev, "used for clock events\n");
+
+ /* Register with dummy 1 Hz value, gets updated in ->set_mode() */
+ clockevents_config_and_register(ced, 1, 2, 0xffffffff);
+}
+
+static int __devinit em_sti_probe(struct platform_device *pdev)
+{
+ struct em_sti_priv *p;
+ struct resource *res;
+ int irq, ret;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL) {
+ dev_err(&pdev->dev, "failed to allocate driver data\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ p->pdev = pdev;
+ platform_set_drvdata(pdev, p);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get I/O memory\n");
+ ret = -EINVAL;
+ goto err0;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ ret = -EINVAL;
+ goto err0;
+ }
+
+ /* map memory, let base point to the STI instance */
+ p->base = ioremap_nocache(res->start, resource_size(res));
+ if (p->base == NULL) {
+ dev_err(&pdev->dev, "failed to remap I/O memory\n");
+ ret = -ENXIO;
+ goto err0;
+ }
+
+ /* get hold of clock */
+ p->clk = clk_get(&pdev->dev, "sclk");
+ if (IS_ERR(p->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
+
+ if (request_irq(irq, em_sti_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
+ dev_name(&pdev->dev), p)) {
+ dev_err(&pdev->dev, "failed to request low IRQ\n");
+ ret = -ENOENT;
+ goto err2;
+ }
+
+ raw_spin_lock_init(&p->lock);
+ em_sti_register_clockevent(p);
+ em_sti_register_clocksource(p);
+ return 0;
+
+err2:
+ clk_put(p->clk);
+err1:
+ iounmap(p->base);
+err0:
+ kfree(p);
+ return ret;
+}
+
+static int __devexit em_sti_remove(struct platform_device *pdev)
+{
+ return -EBUSY; /* cannot unregister clockevent and clocksource */
+}
+
+static const struct of_device_id em_sti_dt_ids[] __devinitconst = {
+ { .compatible = "renesas,em-sti", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, em_sti_dt_ids);
+
+static struct platform_driver em_sti_device_driver = {
+ .probe = em_sti_probe,
+ .remove = __devexit_p(em_sti_remove),
+ .driver = {
+ .name = "em_sti",
+ .of_match_table = em_sti_dt_ids,
+ }
+};
+
+module_platform_driver(em_sti_device_driver);
+
+MODULE_AUTHOR("Magnus Damm");
+MODULE_DESCRIPTION("Renesas Emma Mobile STI Timer Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 32fe9ef..98b06ba 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -48,13 +48,13 @@ struct sh_cmt_priv {
unsigned long next_match_value;
unsigned long max_match_value;
unsigned long rate;
- spinlock_t lock;
+ raw_spinlock_t lock;
struct clock_event_device ced;
struct clocksource cs;
unsigned long total_cycles;
};
-static DEFINE_SPINLOCK(sh_cmt_lock);
+static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
#define CMSTR -1 /* shared register */
#define CMCSR 0 /* channel register */
@@ -139,7 +139,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
unsigned long flags, value;
/* start stop register shared by multiple timer channels */
- spin_lock_irqsave(&sh_cmt_lock, flags);
+ raw_spin_lock_irqsave(&sh_cmt_lock, flags);
value = sh_cmt_read(p, CMSTR);
if (start)
@@ -148,7 +148,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
value &= ~(1 << cfg->timer_bit);
sh_cmt_write(p, CMSTR, value);
- spin_unlock_irqrestore(&sh_cmt_lock, flags);
+ raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
}
static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
@@ -328,9 +328,9 @@ static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
{
unsigned long flags;
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
__sh_cmt_set_next(p, delta);
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
@@ -385,7 +385,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
int ret = 0;
unsigned long flags;
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
ret = sh_cmt_enable(p, &p->rate);
@@ -398,7 +398,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT)))
__sh_cmt_set_next(p, p->max_match_value);
out:
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
return ret;
}
@@ -408,7 +408,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
unsigned long flags;
unsigned long f;
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
p->flags &= ~flag;
@@ -420,7 +420,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE))
__sh_cmt_set_next(p, p->max_match_value);
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
@@ -435,13 +435,13 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
unsigned long value;
int has_wrapped;
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
value = p->total_cycles;
raw = sh_cmt_get_counter(p, &has_wrapped);
if (unlikely(has_wrapped))
raw += p->match_value + 1;
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
return value + raw;
}
@@ -591,7 +591,7 @@ static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
p->max_match_value = (1 << p->width) - 1;
p->match_value = p->max_match_value;
- spin_lock_init(&p->lock);
+ raw_spin_lock_init(&p->lock);
if (clockevent_rating)
sh_cmt_register_clockevent(p, name, clockevent_rating);
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index a2172f6..d9b76ca 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -43,7 +43,7 @@ struct sh_mtu2_priv {
struct clock_event_device ced;
};
-static DEFINE_SPINLOCK(sh_mtu2_lock);
+static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);
#define TSTR -1 /* shared register */
#define TCR 0 /* channel register */
@@ -107,7 +107,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
unsigned long flags, value;
/* start stop register shared by multiple timer channels */
- spin_lock_irqsave(&sh_mtu2_lock, flags);
+ raw_spin_lock_irqsave(&sh_mtu2_lock, flags);
value = sh_mtu2_read(p, TSTR);
if (start)
@@ -116,7 +116,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
value &= ~(1 << cfg->timer_bit);
sh_mtu2_write(p, TSTR, value);
- spin_unlock_irqrestore(&sh_mtu2_lock, flags);
+ raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags);
}
static int sh_mtu2_enable(struct sh_mtu2_priv *p)
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 97f54b6..c1b51d4 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -45,7 +45,7 @@ struct sh_tmu_priv {
struct clocksource cs;
};
-static DEFINE_SPINLOCK(sh_tmu_lock);
+static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
#define TSTR -1 /* shared register */
#define TCOR 0 /* channel register */
@@ -95,7 +95,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
unsigned long flags, value;
/* start stop register shared by multiple timer channels */
- spin_lock_irqsave(&sh_tmu_lock, flags);
+ raw_spin_lock_irqsave(&sh_tmu_lock, flags);
value = sh_tmu_read(p, TSTR);
if (start)
@@ -104,7 +104,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
value &= ~(1 << cfg->timer_bit);
sh_tmu_write(p, TSTR, value);
- spin_unlock_irqrestore(&sh_tmu_lock, flags);
+ raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
}
static int sh_tmu_enable(struct sh_tmu_priv *p)
@@ -245,12 +245,7 @@ static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic)
sh_tmu_enable(p);
- /* TODO: calculate good shift from rate and counter bit width */
-
- ced->shift = 32;
- ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
- ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced);
- ced->min_delta_ns = 5000;
+ clockevents_config(ced, p->rate);
if (periodic) {
p->periodic = (p->rate + HZ/2) / HZ;
@@ -323,7 +318,8 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
ced->set_mode = sh_tmu_clock_event_mode;
dev_info(&p->pdev->dev, "used for clock events\n");
- clockevents_register_device(ced);
+
+ clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
ret = setup_irq(p->irqaction.irq, &p->irqaction);
if (ret) {
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index dde6a0f..116cf8d 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -101,19 +101,19 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
if (!skb)
return -ENOMEM;
- nlh = NLMSG_PUT(skb, 0, msg->seq, NLMSG_DONE, size - sizeof(*nlh));
+ nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size - sizeof(*nlh), 0);
+ if (!nlh) {
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
- data = NLMSG_DATA(nlh);
+ data = nlmsg_data(nlh);
memcpy(data, msg, sizeof(*data) + msg->len);
NETLINK_CB(skb).dst_group = group;
return netlink_broadcast(dev->nls, skb, 0, group, gfp_mask);
-
-nlmsg_failure:
- kfree_skb(skb);
- return -EINVAL;
}
EXPORT_SYMBOL_GPL(cn_netlink_send);
@@ -251,15 +251,20 @@ static const struct file_operations cn_file_ops = {
.release = single_release
};
+static struct cn_dev cdev = {
+ .input = cn_rx_skb,
+};
+
static int __devinit cn_init(void)
{
struct cn_dev *dev = &cdev;
-
- dev->input = cn_rx_skb;
+ struct netlink_kernel_cfg cfg = {
+ .groups = CN_NETLINK_USERS + 0xf,
+ .input = dev->input,
+ };
dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR,
- CN_NETLINK_USERS + 0xf,
- dev->input, NULL, THIS_MODULE);
+ THIS_MODULE, &cfg);
if (!dev->nls)
return -EIO;
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index e23dc82..7212961 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1626,4 +1626,4 @@ module_exit(dw_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index fb4f499..1dc2a4a 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -815,8 +815,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac)
init_completion(&sdmac->done);
- sdmac->buf_tail = 0;
-
return 0;
out:
@@ -927,6 +925,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
sdmac->flags = 0;
+ sdmac->buf_tail = 0;
+
dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
sg_len, channel);
@@ -1027,6 +1027,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
sdmac->status = DMA_IN_PROGRESS;
+ sdmac->buf_tail = 0;
+
sdmac->flags |= IMX_DMA_SG_LOOP;
sdmac->direction = direction;
ret = sdma_load_context(sdmac);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index cbcc28e..e4feba6 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -392,6 +392,8 @@ struct pl330_req {
struct pl330_reqcfg *cfg;
/* Pointer to first xfer in the request. */
struct pl330_xfer *x;
+ /* Hook to attach to DMAC's list of reqs with due callback */
+ struct list_head rqd;
};
/*
@@ -461,8 +463,6 @@ struct _pl330_req {
/* Number of bytes taken to setup MC for the req */
u32 mc_len;
struct pl330_req *r;
- /* Hook to attach to DMAC's list of reqs with due callback */
- struct list_head rqd;
};
/* ToBeDone for tasklet */
@@ -1683,7 +1683,7 @@ static void pl330_dotask(unsigned long data)
/* Returns 1 if state was updated, 0 otherwise */
static int pl330_update(const struct pl330_info *pi)
{
- struct _pl330_req *rqdone;
+ struct pl330_req *rqdone, *tmp;
struct pl330_dmac *pl330;
unsigned long flags;
void __iomem *regs;
@@ -1750,7 +1750,10 @@ static int pl330_update(const struct pl330_info *pi)
if (active == -1) /* Aborted */
continue;
- rqdone = &thrd->req[active];
+ /* Detach the req */
+ rqdone = thrd->req[active].r;
+ thrd->req[active].r = NULL;
+
mark_free(thrd, active);
/* Get going again ASAP */
@@ -1762,20 +1765,11 @@ static int pl330_update(const struct pl330_info *pi)
}
/* Now that we are in no hurry, do the callbacks */
- while (!list_empty(&pl330->req_done)) {
- struct pl330_req *r;
-
- rqdone = container_of(pl330->req_done.next,
- struct _pl330_req, rqd);
-
- list_del_init(&rqdone->rqd);
-
- /* Detach the req */
- r = rqdone->r;
- rqdone->r = NULL;
+ list_for_each_entry_safe(rqdone, tmp, &pl330->req_done, rqd) {
+ list_del(&rqdone->rqd);
spin_unlock_irqrestore(&pl330->lock, flags);
- _callback(r, PL330_ERR_NONE);
+ _callback(rqdone, PL330_ERR_NONE);
spin_lock_irqsave(&pl330->lock, flags);
}
@@ -2321,7 +2315,7 @@ static void pl330_tasklet(unsigned long data)
/* Pick up ripe tomatoes */
list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
if (desc->status == DONE) {
- if (pch->cyclic)
+ if (!pch->cyclic)
dma_cookie_complete(&desc->txd);
list_move_tail(&desc->node, &list);
}
@@ -2539,7 +2533,7 @@ static inline void _init_desc(struct dma_pl330_desc *desc)
}
/* Returns the number of descriptors added to the DMAC pool */
-int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
+static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
{
struct dma_pl330_desc *desc;
unsigned long flags;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 10f3750..de5ba86e 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -164,7 +164,7 @@ void *edac_align_ptr(void **p, unsigned size, int n_elems)
else
return (char *)ptr;
- r = size % align;
+ r = (unsigned long)p % align;
if (r == 0)
return (char *)ptr;
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index d27778f..a499c7e 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1814,12 +1814,6 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
if (mce->bank != 8)
return NOTIFY_DONE;
-#ifdef CONFIG_SMP
- /* Only handle if it is the right mc controller */
- if (mce->socketid != pvt->i7core_dev->socket)
- return NOTIFY_DONE;
-#endif
-
smp_rmb();
if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
smp_wmb();
@@ -2116,8 +2110,6 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
if (pvt->enable_scrub)
disable_sdram_scrub_setting(mci);
- mce_unregister_decode_chain(&i7_mce_dec);
-
/* Disable EDAC polling */
i7core_pci_ctl_release(pvt);
@@ -2222,8 +2214,6 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
/* DCLK for scrub rate setting */
pvt->dclk_freq = get_dclk_freq();
- mce_register_decode_chain(&i7_mce_dec);
-
return 0;
fail0:
@@ -2367,8 +2357,10 @@ static int __init i7core_init(void)
pci_rc = pci_register_driver(&i7core_driver);
- if (pci_rc >= 0)
+ if (pci_rc >= 0) {
+ mce_register_decode_chain(&i7_mce_dec);
return 0;
+ }
i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
pci_rc);
@@ -2384,6 +2376,7 @@ static void __exit i7core_exit(void)
{
debugf2("MC: " __FILE__ ": %s()\n", __func__);
pci_unregister_driver(&i7core_driver);
+ mce_unregister_decode_chain(&i7_mce_dec);
}
module_init(i7core_init);
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 4c40235..0e37462 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -980,7 +980,8 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = 1;
layers[1].is_virt_csrow = false;
- mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), sizeof(*pdata));
+ mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
+ sizeof(*pdata));
if (!mci) {
devres_release_group(&op->dev, mpc85xx_mc_err_probe);
return -ENOMEM;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 4adaf4b..36ad17e 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -555,7 +555,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
pvt->is_close_pg = false;
}
- pci_read_config_dword(pvt->pci_ta, RANK_CFG_A, &reg);
+ pci_read_config_dword(pvt->pci_ddrio, RANK_CFG_A, &reg);
if (IS_RDIMM_ENABLED(reg)) {
/* FIXME: Can also be LRDIMM */
debugf0("Memory is registered\n");
@@ -1604,8 +1604,6 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
__func__, mci, &sbridge_dev->pdev[0]->dev);
- mce_unregister_decode_chain(&sbridge_mce_dec);
-
/* Remove MC sysfs nodes */
edac_mc_del_mc(mci->dev);
@@ -1682,7 +1680,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
goto fail0;
}
- mce_register_decode_chain(&sbridge_mce_dec);
return 0;
fail0:
@@ -1811,8 +1808,10 @@ static int __init sbridge_init(void)
pci_rc = pci_register_driver(&sbridge_driver);
- if (pci_rc >= 0)
+ if (pci_rc >= 0) {
+ mce_register_decode_chain(&sbridge_mce_dec);
return 0;
+ }
sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
pci_rc);
@@ -1828,6 +1827,7 @@ static void __exit sbridge_exit(void)
{
debugf2("MC: " __FILE__ ": %s()\n", __func__);
pci_unregister_driver(&sbridge_driver);
+ mce_unregister_decode_chain(&sbridge_mce_dec);
}
module_init(sbridge_init);
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 23416e4..a4ed30b 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -116,8 +116,8 @@ const char *max8997_extcon_cable[] = {
[5] = "Charge-downstream",
[6] = "MHL",
[7] = "Dock-desk",
- [7] = "Dock-card",
- [8] = "JIG",
+ [8] = "Dock-card",
+ [9] = "JIG",
NULL,
};
@@ -514,6 +514,7 @@ static int __devexit max8997_muic_remove(struct platform_device *pdev)
extcon_dev_unregister(info->edev);
+ kfree(info->edev);
kfree(info);
return 0;
diff --git a/drivers/extcon/extcon_class.c b/drivers/extcon/extcon_class.c
index f598a70..159aeb0 100644
--- a/drivers/extcon/extcon_class.c
+++ b/drivers/extcon/extcon_class.c
@@ -762,7 +762,7 @@ int extcon_dev_register(struct extcon_dev *edev, struct device *dev)
#if defined(CONFIG_ANDROID)
if (switch_class)
ret = class_compat_create_link(switch_class, edev->dev,
- dev);
+ NULL);
#endif /* CONFIG_ANDROID */
spin_lock_init(&edev->lock);
diff --git a/drivers/extcon/extcon_gpio.c b/drivers/extcon/extcon_gpio.c
index fe7a07b..8a0dcc1 100644
--- a/drivers/extcon/extcon_gpio.c
+++ b/drivers/extcon/extcon_gpio.c
@@ -125,6 +125,7 @@ static int __devinit gpio_extcon_probe(struct platform_device *pdev)
if (ret < 0)
goto err_request_irq;
+ platform_set_drvdata(pdev, extcon_data);
/* Perform initial detection */
gpio_extcon_work(&extcon_data->work.work);
@@ -146,6 +147,7 @@ static int __devexit gpio_extcon_remove(struct platform_device *pdev)
struct gpio_extcon_data *extcon_data = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&extcon_data->work);
+ free_irq(extcon_data->irq, extcon_data);
gpio_free(extcon_data->gpio);
extcon_dev_unregister(&extcon_data->edev);
devm_kfree(&pdev->dev, extcon_data);
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 7bb0044..b6453d0 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -2833,7 +2833,7 @@ static __init void exynos5_gpiolib_init(void)
}
/* need to set base address for gpc4 */
- exonys5_gpios_1[11].base = gpio_base1 + 0x2E0;
+ exynos5_gpios_1[11].base = gpio_base1 + 0x2E0;
/* need to set base address for gpx */
chip = &exynos5_gpios_1[21];
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index eb92fe2..a8743c3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -610,7 +610,7 @@ static bool
drm_monitor_supports_rb(struct edid *edid)
{
if (edid->revision >= 4) {
- bool ret;
+ bool ret = false;
drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
return ret;
}
@@ -1039,6 +1039,24 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
return true;
}
+static bool valid_inferred_mode(const struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ struct drm_display_mode *m;
+ bool ok = false;
+
+ list_for_each_entry(m, &connector->probed_modes, head) {
+ if (mode->hdisplay == m->hdisplay &&
+ mode->vdisplay == m->vdisplay &&
+ drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
+ return false; /* duplicated */
+ if (mode->hdisplay <= m->hdisplay &&
+ mode->vdisplay <= m->vdisplay)
+ ok = true;
+ }
+ return ok;
+}
+
static int
drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct detailed_timing *timing)
@@ -1048,7 +1066,8 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct drm_device *dev = connector->dev;
for (i = 0; i < drm_num_dmt_modes; i++) {
- if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
+ if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
+ valid_inferred_mode(connector, drm_dmt_modes + i)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
@@ -1088,7 +1107,8 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
return modes;
fixup_mode_1366x768(newmode);
- if (!mode_in_range(newmode, edid, timing)) {
+ if (!mode_in_range(newmode, edid, timing) ||
+ !valid_inferred_mode(connector, newmode)) {
drm_mode_destroy(dev, newmode);
continue;
}
@@ -1116,7 +1136,8 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
return modes;
fixup_mode_1366x768(newmode);
- if (!mode_in_range(newmode, edid, timing)) {
+ if (!mode_in_range(newmode, edid, timing) ||
+ !valid_inferred_mode(connector, newmode)) {
drm_mode_destroy(dev, newmode);
continue;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 4209531..d6de2e07f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -244,8 +244,8 @@ static const struct file_operations exynos_drm_driver_fops = {
};
static struct drm_driver exynos_drm_driver = {
- .driver_features = DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM |
- DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
+ DRIVER_GEM | DRIVER_PRIME,
.load = exynos_drm_load,
.unload = exynos_drm_unload,
.open = exynos_drm_open,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 6e9ac7b..23d5ad3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -172,19 +172,12 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
manager_ops->commit(manager->dev);
}
-static struct drm_crtc *
-exynos_drm_encoder_get_crtc(struct drm_encoder *encoder)
-{
- return encoder->crtc;
-}
-
static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = {
.dpms = exynos_drm_encoder_dpms,
.mode_fixup = exynos_drm_encoder_mode_fixup,
.mode_set = exynos_drm_encoder_mode_set,
.prepare = exynos_drm_encoder_prepare,
.commit = exynos_drm_encoder_commit,
- .get_crtc = exynos_drm_encoder_get_crtc,
};
static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index f82a299..4ccfe43 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -51,11 +51,22 @@ struct exynos_drm_fb {
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+ unsigned int i;
DRM_DEBUG_KMS("%s\n", __FILE__);
drm_framebuffer_cleanup(fb);
+ for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
+ struct drm_gem_object *obj;
+
+ if (exynos_fb->exynos_gem_obj[i] == NULL)
+ continue;
+
+ obj = &exynos_fb->exynos_gem_obj[i]->base;
+ drm_gem_object_unreference_unlocked(obj);
+ }
+
kfree(exynos_fb);
exynos_fb = NULL;
}
@@ -134,11 +145,11 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-ENOENT);
}
- drm_gem_object_unreference_unlocked(obj);
-
fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
- if (IS_ERR(fb))
+ if (IS_ERR(fb)) {
+ drm_gem_object_unreference_unlocked(obj);
return fb;
+ }
exynos_fb = to_exynos_fb(fb);
nr = exynos_drm_format_num_buffers(fb->pixel_format);
@@ -152,8 +163,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-ENOENT);
}
- drm_gem_object_unreference_unlocked(obj);
-
exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index 3ecb30d..5082375 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -31,10 +31,10 @@
static inline int exynos_drm_format_num_buffers(uint32_t format)
{
switch (format) {
- case DRM_FORMAT_NV12M:
+ case DRM_FORMAT_NV12:
case DRM_FORMAT_NV12MT:
return 2;
- case DRM_FORMAT_YUV420M:
+ case DRM_FORMAT_YUV420:
return 3;
default:
return 1;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index fc91293..5c8b683 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -689,7 +689,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset)
{
- struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
int ret = 0;
@@ -710,15 +709,13 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
goto unlock;
}
- exynos_gem_obj = to_exynos_gem_obj(obj);
-
- if (!exynos_gem_obj->base.map_list.map) {
- ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base);
+ if (!obj->map_list.map) {
+ ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out;
}
- *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT;
+ *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
out:
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 68ef010..e2147a2 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -365,7 +365,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
switch (win_data->pixel_format) {
case DRM_FORMAT_NV12MT:
tiled_mode = true;
- case DRM_FORMAT_NV12M:
+ case DRM_FORMAT_NV12:
crcb_mode = false;
buf_num = 2;
break;
@@ -601,18 +601,20 @@ static void mixer_win_reset(struct mixer_context *ctx)
mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
/* setting graphical layers */
-
val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
val |= MXR_GRP_CFG_WIN_BLEND_EN;
+ val |= MXR_GRP_CFG_BLEND_PRE_MUL;
+ val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
/* the same configuration for both layers */
mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
-
- val |= MXR_GRP_CFG_BLEND_PRE_MUL;
- val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
+ /* setting video layers */
+ val = MXR_GRP_CFG_ALPHA_VAL(0);
+ mixer_reg_write(res, MXR_VIDEO_CFG, val);
+
/* configuration of Video Processor Registers */
vp_win_reset(ctx);
vp_default_filter(res);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index f947926..36822b9 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1401,6 +1401,27 @@ i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
}
}
+static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+ struct apertures_struct *ap;
+ struct pci_dev *pdev = dev_priv->dev->pdev;
+ bool primary;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return;
+
+ ap->ranges[0].base = dev_priv->dev->agp->base;
+ ap->ranges[0].size =
+ dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+ primary =
+ pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+
+ remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+
+ kfree(ap);
+}
+
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
@@ -1446,6 +1467,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv;
}
+ dev_priv->mm.gtt = intel_gtt_get();
+ if (!dev_priv->mm.gtt) {
+ DRM_ERROR("Failed to initialize GTT\n");
+ ret = -ENODEV;
+ goto put_bridge;
+ }
+
+ i915_kick_out_firmware_fb(dev_priv);
+
pci_set_master(dev->pdev);
/* overlay on gen2 is broken and can't address above 1G */
@@ -1471,13 +1501,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto put_bridge;
}
- dev_priv->mm.gtt = intel_gtt_get();
- if (!dev_priv->mm.gtt) {
- DRM_ERROR("Failed to initialize GTT\n");
- ret = -ENODEV;
- goto out_rmmap;
- }
-
aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
dev_priv->mm.gtt_mapping =
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 238a521..9fe9ebe 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -233,6 +233,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
.has_blt_ring = 1,
.has_llc = 1,
.has_pch_split = 1,
+ .has_force_wake = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
@@ -243,6 +244,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_blt_ring = 1,
.has_llc = 1,
.has_pch_split = 1,
+ .has_force_wake = 1,
};
static const struct intel_device_info intel_ivybridge_d_info = {
@@ -252,6 +254,7 @@ static const struct intel_device_info intel_ivybridge_d_info = {
.has_blt_ring = 1,
.has_llc = 1,
.has_pch_split = 1,
+ .has_force_wake = 1,
};
static const struct intel_device_info intel_ivybridge_m_info = {
@@ -262,6 +265,7 @@ static const struct intel_device_info intel_ivybridge_m_info = {
.has_blt_ring = 1,
.has_llc = 1,
.has_pch_split = 1,
+ .has_force_wake = 1,
};
static const struct intel_device_info intel_valleyview_m_info = {
@@ -289,6 +293,7 @@ static const struct intel_device_info intel_haswell_d_info = {
.has_blt_ring = 1,
.has_llc = 1,
.has_pch_split = 1,
+ .has_force_wake = 1,
};
static const struct intel_device_info intel_haswell_m_info = {
@@ -298,6 +303,7 @@ static const struct intel_device_info intel_haswell_m_info = {
.has_blt_ring = 1,
.has_llc = 1,
.has_pch_split = 1,
+ .has_force_wake = 1,
};
static const struct pci_device_id pciidlist[] = { /* aka */
@@ -1139,10 +1145,9 @@ MODULE_LICENSE("GPL and additional rights");
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
- (((dev_priv)->info->gen >= 6) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE)) && \
- (!IS_VALLEYVIEW((dev_priv)->dev))
+ ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE))
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c9cfc67..b0b676a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -285,6 +285,7 @@ struct intel_device_info {
u8 is_ivybridge:1;
u8 is_valleyview:1;
u8 has_pch_split:1;
+ u8 has_force_wake:1;
u8 is_haswell:1;
u8 has_fbc:1;
u8 has_pipe_cxsr:1;
@@ -1101,6 +1102,8 @@ struct drm_i915_file_private {
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
+
#include "i915_trace.h"
/**
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 1417660..ed3224c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -412,7 +412,6 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
*/
spin_lock_irqsave(&dev_priv->rps_lock, flags);
- WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
dev_priv->pm_iir |= pm_iir;
I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
POSTING_READ(GEN6_PMIMR);
@@ -510,7 +509,7 @@ out:
return ret;
}
-static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
+static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
@@ -550,6 +549,35 @@ static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
}
+static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
+ DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
+ (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
+ SDE_AUDIO_POWER_SHIFT_CPT);
+
+ if (pch_iir & SDE_AUX_MASK_CPT)
+ DRM_DEBUG_DRIVER("AUX channel interrupt\n");
+
+ if (pch_iir & SDE_GMBUS_CPT)
+ DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
+
+ if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
+ DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
+
+ if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
+ DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
+
+ if (pch_iir & SDE_FDI_MASK_CPT)
+ for_each_pipe(pipe)
+ DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
+ pipe_name(pipe),
+ I915_READ(FDI_RX_IIR(pipe)));
+}
+
static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -591,7 +619,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
if (pch_iir & SDE_HOTPLUG_MASK_CPT)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
- pch_irq_handler(dev, pch_iir);
+ cpt_irq_handler(dev, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
@@ -684,7 +712,10 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
if (de_iir & DE_PCH_EVENT) {
if (pch_iir & hotplug_mask)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
- pch_irq_handler(dev, pch_iir);
+ if (HAS_PCH_CPT(dev))
+ cpt_irq_handler(dev, pch_iir);
+ else
+ ibx_irq_handler(dev, pch_iir);
}
if (de_iir & DE_PCU_EVENT) {
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2d49b95..48d5e8e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -210,6 +210,14 @@
#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
+/* IVB has funny definitions for which plane to flip. */
+#define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19)
+#define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
+#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
+
#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
#define MI_MM_SPACE_GTT (1<<8)
#define MI_MM_SPACE_PHYSICAL (0<<8)
@@ -3313,7 +3321,7 @@
/* PCH */
-/* south display engine interrupt */
+/* south display engine interrupt: IBX */
#define SDE_AUDIO_POWER_D (1 << 27)
#define SDE_AUDIO_POWER_C (1 << 26)
#define SDE_AUDIO_POWER_B (1 << 25)
@@ -3349,15 +3357,44 @@
#define SDE_TRANSA_CRC_ERR (1 << 1)
#define SDE_TRANSA_FIFO_UNDER (1 << 0)
#define SDE_TRANS_MASK (0x3f)
-/* CPT */
-#define SDE_CRT_HOTPLUG_CPT (1 << 19)
+
+/* south display engine interrupt: CPT/PPT */
+#define SDE_AUDIO_POWER_D_CPT (1 << 31)
+#define SDE_AUDIO_POWER_C_CPT (1 << 30)
+#define SDE_AUDIO_POWER_B_CPT (1 << 29)
+#define SDE_AUDIO_POWER_SHIFT_CPT 29
+#define SDE_AUDIO_POWER_MASK_CPT (7 << 29)
+#define SDE_AUXD_CPT (1 << 27)
+#define SDE_AUXC_CPT (1 << 26)
+#define SDE_AUXB_CPT (1 << 25)
+#define SDE_AUX_MASK_CPT (7 << 25)
#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
+#define SDE_CRT_HOTPLUG_CPT (1 << 19)
#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
SDE_PORTD_HOTPLUG_CPT | \
SDE_PORTC_HOTPLUG_CPT | \
SDE_PORTB_HOTPLUG_CPT)
+#define SDE_GMBUS_CPT (1 << 17)
+#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
+#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9)
+#define SDE_FDI_RXC_CPT (1 << 8)
+#define SDE_AUDIO_CP_REQ_B_CPT (1 << 6)
+#define SDE_AUDIO_CP_CHG_B_CPT (1 << 5)
+#define SDE_FDI_RXB_CPT (1 << 4)
+#define SDE_AUDIO_CP_REQ_A_CPT (1 << 2)
+#define SDE_AUDIO_CP_CHG_A_CPT (1 << 1)
+#define SDE_FDI_RXA_CPT (1 << 0)
+#define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \
+ SDE_AUDIO_CP_REQ_B_CPT | \
+ SDE_AUDIO_CP_REQ_A_CPT)
+#define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \
+ SDE_AUDIO_CP_CHG_B_CPT | \
+ SDE_AUDIO_CP_CHG_A_CPT)
+#define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \
+ SDE_FDI_RXB_CPT | \
+ SDE_FDI_RXA_CPT)
#define SDEISR 0xc4000
#define SDEIMR 0xc4004
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 0ede02a..a748e5c 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -740,8 +740,11 @@ static void i915_restore_display(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
- I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
+ /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
+ * otherwise we get blank eDP screen after S3 on some machines
+ */
I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 91478942..a8538ac 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6158,17 +6158,34 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+ uint32_t plane_bit = 0;
int ret;
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto err;
+ switch(intel_crtc->plane) {
+ case PLANE_A:
+ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
+ break;
+ case PLANE_B:
+ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
+ break;
+ case PLANE_C:
+ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
+ break;
+ default:
+ WARN_ONCE(1, "unknown plane in flip command\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
ret = intel_ring_begin(ring, 4);
if (ret)
goto err_unpin;
- intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, (obj->gtt_offset));
intel_ring_emit(ring, (MI_NOOP));
@@ -6541,7 +6558,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(HDMIC) & PORT_DETECTED)
intel_hdmi_init(dev, HDMIC);
- if (I915_READ(HDMID) & PORT_DETECTED)
+ if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
intel_hdmi_init(dev, HDMID);
if (I915_READ(PCH_DP_C) & DP_DETECTED)
@@ -6904,19 +6921,6 @@ static void i915_disable_vga(struct drm_device *dev)
POSTING_READ(vga_reg);
}
-static void ivb_pch_pwm_override(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /*
- * IVB has CPU eDP backlight regs too, set things up to let the
- * PCH regs control the backlight
- */
- I915_WRITE(BLC_PWM_CPU_CTL2, PWM_ENABLE);
- I915_WRITE(BLC_PWM_CPU_CTL, 0);
- I915_WRITE(BLC_PWM_PCH_CTL1, PWM_ENABLE | (1<<30));
-}
-
void intel_modeset_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6933,9 +6937,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
gen6_enable_rps(dev_priv);
gen6_update_ring_freq(dev_priv);
}
-
- if (IS_IVYBRIDGE(dev))
- ivb_pch_pwm_override(dev);
}
void intel_modeset_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 296cfc2..c044932 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -32,6 +32,7 @@
#include "drm.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
+#include "drm_edid.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
@@ -67,6 +68,8 @@ struct intel_dp {
struct drm_display_mode *panel_fixed_mode; /* for eDP */
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
+ struct edid *edid; /* cached EDID for eDP */
+ int edid_mode_count;
};
/**
@@ -371,7 +374,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
int recv_bytes;
uint32_t status;
uint32_t aux_clock_divider;
- int try, precharge = 5;
+ int try, precharge;
intel_dp_check_edp(intel_dp);
/* The clock divider is based off the hrawclk,
@@ -391,6 +394,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
else
aux_clock_divider = intel_hrawclk(dev) / 2;
+ if (IS_GEN6(dev))
+ precharge = 3;
+ else
+ precharge = 5;
+
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
status = I915_READ(ch_ctl);
@@ -1973,6 +1981,8 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
+ ironlake_edp_panel_vdd_on(intel_dp);
+
if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
@@ -1980,6 +1990,8 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
+
+ ironlake_edp_panel_vdd_off(intel_dp, false);
}
static bool
@@ -2116,10 +2128,22 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct edid *edid;
+ int size;
+
+ if (is_edp(intel_dp)) {
+ if (!intel_dp->edid)
+ return NULL;
+
+ size = (intel_dp->edid->extensions + 1) * EDID_LENGTH;
+ edid = kmalloc(size, GFP_KERNEL);
+ if (!edid)
+ return NULL;
+
+ memcpy(edid, intel_dp->edid, size);
+ return edid;
+ }
- ironlake_edp_panel_vdd_on(intel_dp);
edid = drm_get_edid(connector, adapter);
- ironlake_edp_panel_vdd_off(intel_dp, false);
return edid;
}
@@ -2129,9 +2153,17 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada
struct intel_dp *intel_dp = intel_attached_dp(connector);
int ret;
- ironlake_edp_panel_vdd_on(intel_dp);
+ if (is_edp(intel_dp)) {
+ drm_mode_connector_update_edid_property(connector,
+ intel_dp->edid);
+ ret = drm_add_edid_modes(connector, intel_dp->edid);
+ drm_edid_to_eld(connector,
+ intel_dp->edid);
+ connector->display_info.raw_edid = NULL;
+ return intel_dp->edid_mode_count;
+ }
+
ret = intel_ddc_get_modes(connector, adapter);
- ironlake_edp_panel_vdd_off(intel_dp, false);
return ret;
}
@@ -2321,6 +2353,7 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
i2c_del_adapter(&intel_dp->adapter);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
+ kfree(intel_dp->edid);
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
ironlake_panel_vdd_off_sync(intel_dp);
}
@@ -2504,11 +2537,14 @@ intel_dp_init(struct drm_device *dev, int output_reg)
break;
}
+ intel_dp_i2c_init(intel_dp, intel_connector, name);
+
/* Cache some DPCD data in the eDP case */
if (is_edp(intel_dp)) {
bool ret;
struct edp_power_seq cur, vbt;
u32 pp_on, pp_off, pp_div;
+ struct edid *edid;
pp_on = I915_READ(PCH_PP_ON_DELAYS);
pp_off = I915_READ(PCH_PP_OFF_DELAYS);
@@ -2576,9 +2612,19 @@ intel_dp_init(struct drm_device *dev, int output_reg)
intel_dp_destroy(&intel_connector->base);
return;
}
- }
- intel_dp_i2c_init(intel_dp, intel_connector, name);
+ ironlake_edp_panel_vdd_on(intel_dp);
+ edid = drm_get_edid(connector, &intel_dp->adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector,
+ edid);
+ intel_dp->edid_mode_count =
+ drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
+ intel_dp->edid = edid;
+ }
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+ }
intel_encoder->hot_plug = intel_dp_hot_plug;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b59b6d5..e5b84ff 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -266,10 +266,15 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
static int init_ring_common(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = ring->obj;
+ int ret = 0;
u32 head;
+ if (HAS_FORCE_WAKE(dev))
+ gen6_gt_force_wake_get(dev_priv);
+
/* Stop the ring if it's running. */
I915_WRITE_CTL(ring, 0);
I915_WRITE_HEAD(ring, 0);
@@ -317,7 +322,8 @@ static int init_ring_common(struct intel_ring_buffer *ring)
I915_READ_HEAD(ring),
I915_READ_TAIL(ring),
I915_READ_START(ring));
- return -EIO;
+ ret = -EIO;
+ goto out;
}
if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
@@ -326,9 +332,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
ring->head = I915_READ_HEAD(ring);
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring_space(ring);
+ ring->last_retired_head = -1;
}
- return 0;
+out:
+ if (HAS_FORCE_WAKE(dev))
+ gen6_gt_force_wake_put(dev_priv);
+
+ return ret;
}
static int
@@ -987,6 +998,10 @@ static int intel_init_ring_buffer(struct drm_device *dev,
if (ret)
goto err_unref;
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto err_unpin;
+
ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset,
ring->size);
if (ring->virtual_start == NULL) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 153b9a1..1074bc5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -467,7 +467,7 @@ int nouveau_fbcon_init(struct drm_device *dev)
nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
ret = drm_fb_helper_init(dev, &nfbdev->helper,
- nv_two_heads(dev) ? 2 : 1, 4);
+ dev->mode_config.num_crtc, 4);
if (ret) {
kfree(nfbdev);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index a89240e..a25cf2c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -1,3 +1,26 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ */
#include "drmP.h"
#include "drm.h"
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 01d77d1..3904d79 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1149,7 +1149,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
}
if (tiling_flags & RADEON_TILING_MACRO) {
- if (rdev->family >= CHIP_CAYMAN)
+ if (rdev->family >= CHIP_TAHITI)
+ tmp = rdev->config.si.tile_config;
+ else if (rdev->family >= CHIP_CAYMAN)
tmp = rdev->config.cayman.tile_config;
else
tmp = rdev->config.evergreen.tile_config;
@@ -1177,6 +1179,12 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
} else if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
+ if ((rdev->family == CHIP_TAHITI) ||
+ (rdev->family == CHIP_PITCAIRN))
+ fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
+ else if (rdev->family == CHIP_VERDE)
+ fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
+
switch (radeon_crtc->crtc_id) {
case 0:
WREG32(AVIVO_D1VGA_CONTROL, 0);
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index e7b1ec5..486ccdf 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1926,7 +1926,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
r600_hdmi_enable(encoder);
- if (ASIC_IS_DCE4(rdev))
+ if (ASIC_IS_DCE6(rdev))
+ ; /* TODO (use pointers instead of if-s?) */
+ else if (ASIC_IS_DCE4(rdev))
evergreen_hdmi_setmode(encoder, adjusted_mode);
else
r600_hdmi_setmode(encoder, adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 01550d0..7fb3d2e 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1932,6 +1932,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+ if (rdev->family <= CHIP_SUMO2)
+ WREG32(SMX_SAR_CTL0, 0x00010000);
+
WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 4e7dd2b..c1655412 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -52,6 +52,7 @@ struct evergreen_cs_track {
u32 cb_color_view[12];
u32 cb_color_pitch[12];
u32 cb_color_slice[12];
+ u32 cb_color_slice_idx[12];
u32 cb_color_attrib[12];
u32 cb_color_cmask_slice[8];/* unused */
u32 cb_color_fmask_slice[8];/* unused */
@@ -127,12 +128,14 @@ static void evergreen_cs_track_init(struct evergreen_cs_track *track)
track->cb_color_info[i] = 0;
track->cb_color_view[i] = 0xFFFFFFFF;
track->cb_color_pitch[i] = 0;
- track->cb_color_slice[i] = 0;
+ track->cb_color_slice[i] = 0xfffffff;
+ track->cb_color_slice_idx[i] = 0;
}
track->cb_target_mask = 0xFFFFFFFF;
track->cb_shader_mask = 0xFFFFFFFF;
track->cb_dirty = true;
+ track->db_depth_slice = 0xffffffff;
track->db_depth_view = 0xFFFFC000;
track->db_depth_size = 0xFFFFFFFF;
track->db_depth_control = 0xFFFFFFFF;
@@ -250,10 +253,9 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
{
struct evergreen_cs_track *track = p->track;
unsigned palign, halign, tileb, slice_pt;
+ unsigned mtile_pr, mtile_ps, mtileb;
tileb = 64 * surf->bpe * surf->nsamples;
- palign = track->group_size / (8 * surf->bpe * surf->nsamples);
- palign = MAX(8, palign);
slice_pt = 1;
if (tileb > surf->tsplit) {
slice_pt = tileb / surf->tsplit;
@@ -262,7 +264,10 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
/* macro tile width & height */
palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
- surf->layer_size = surf->nbx * surf->nby * surf->bpe * slice_pt;
+ mtileb = (palign / 8) * (halign / 8) * tileb;;
+ mtile_pr = surf->nbx / palign;
+ mtile_ps = (mtile_pr * surf->nby) / halign;
+ surf->layer_size = mtile_ps * mtileb * slice_pt;
surf->base_align = (palign / 8) * (halign / 8) * tileb;
surf->palign = palign;
surf->halign = halign;
@@ -434,6 +439,39 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
offset += surf.layer_size * mslice;
if (offset > radeon_bo_size(track->cb_color_bo[id])) {
+ /* old ddx are broken they allocate bo with w*h*bpp but
+ * program slice with ALIGN(h, 8), catch this and patch
+ * command stream.
+ */
+ if (!surf.mode) {
+ volatile u32 *ib = p->ib.ptr;
+ unsigned long tmp, nby, bsize, size, min = 0;
+
+ /* find the height the ddx wants */
+ if (surf.nby > 8) {
+ min = surf.nby - 8;
+ }
+ bsize = radeon_bo_size(track->cb_color_bo[id]);
+ tmp = track->cb_color_bo_offset[id] << 8;
+ for (nby = surf.nby; nby > min; nby--) {
+ size = nby * surf.nbx * surf.bpe * surf.nsamples;
+ if ((tmp + size * mslice) <= bsize) {
+ break;
+ }
+ }
+ if (nby > min) {
+ surf.nby = nby;
+ slice = ((nby * surf.nbx) / 64) - 1;
+ if (!evergreen_surface_check(p, &surf, "cb")) {
+ /* check if this one works */
+ tmp += surf.layer_size * mslice;
+ if (tmp <= bsize) {
+ ib[track->cb_color_slice_idx[id]] = slice;
+ goto old_ddx_ok;
+ }
+ }
+ }
+ }
dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
"offset %d, max layer %d, bo size %ld, slice %d)\n",
__func__, __LINE__, id, surf.layer_size,
@@ -446,6 +484,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
surf.tsplit, surf.mtilea);
return -EINVAL;
}
+old_ddx_ok:
return 0;
}
@@ -1532,6 +1571,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR7_SLICE:
tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_slice_idx[tmp] = idx;
track->cb_dirty = true;
break;
case CB_COLOR8_SLICE:
@@ -1540,6 +1580,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR11_SLICE:
tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_slice_idx[tmp] = idx;
track->cb_dirty = true;
break;
case CB_COLOR0_ATTRIB:
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index a51f880..65c5416 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -156,9 +156,6 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset;
- if (ASIC_IS_DCE5(rdev))
- return;
-
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
return;
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 2773039..b50b15c 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -503,6 +503,7 @@
#define SCRATCH_UMSK 0x8540
#define SCRATCH_ADDR 0x8544
+#define SMX_SAR_CTL0 0xA008
#define SMX_DC_CTL0 0xA020
#define USE_HASH_FUNCTION (1 << 0)
#define NUMBER_OF_SETS(x) ((x) << 1)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 3df4efa..b7bf18e 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -460,15 +460,28 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.max_pipes_per_simd = 4;
rdev->config.cayman.max_tile_pipes = 2;
if ((rdev->pdev->device == 0x9900) ||
- (rdev->pdev->device == 0x9901)) {
+ (rdev->pdev->device == 0x9901) ||
+ (rdev->pdev->device == 0x9905) ||
+ (rdev->pdev->device == 0x9906) ||
+ (rdev->pdev->device == 0x9907) ||
+ (rdev->pdev->device == 0x9908) ||
+ (rdev->pdev->device == 0x9909) ||
+ (rdev->pdev->device == 0x9910) ||
+ (rdev->pdev->device == 0x9917)) {
rdev->config.cayman.max_simds_per_se = 6;
rdev->config.cayman.max_backends_per_se = 2;
} else if ((rdev->pdev->device == 0x9903) ||
- (rdev->pdev->device == 0x9904)) {
+ (rdev->pdev->device == 0x9904) ||
+ (rdev->pdev->device == 0x990A) ||
+ (rdev->pdev->device == 0x9913) ||
+ (rdev->pdev->device == 0x9918)) {
rdev->config.cayman.max_simds_per_se = 4;
rdev->config.cayman.max_backends_per_se = 2;
- } else if ((rdev->pdev->device == 0x9990) ||
- (rdev->pdev->device == 0x9991)) {
+ } else if ((rdev->pdev->device == 0x9919) ||
+ (rdev->pdev->device == 0x9990) ||
+ (rdev->pdev->device == 0x9991) ||
+ (rdev->pdev->device == 0x9994) ||
+ (rdev->pdev->device == 0x99A0)) {
rdev->config.cayman.max_simds_per_se = 3;
rdev->config.cayman.max_backends_per_se = 1;
} else {
@@ -1290,6 +1303,10 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = r600_audio_init(rdev);
+ if (r)
+ return r;
+
return 0;
}
@@ -1316,6 +1333,7 @@ int cayman_resume(struct radeon_device *rdev)
int cayman_suspend(struct radeon_device *rdev)
{
+ r600_audio_fini(rdev);
/* FIXME: we should wait for ring to be empty */
radeon_ib_pool_suspend(rdev);
radeon_vm_manager_suspend(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 45cfcea..bff6272 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1839,6 +1839,7 @@ void r600_gpu_init(struct radeon_device *rdev)
WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
NUM_CLIP_SEQ(3)));
WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
+ WREG32(VC_ENHANCE, 0);
}
@@ -2426,6 +2427,12 @@ int r600_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = r600_audio_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: audio init failed\n");
+ return r;
+ }
+
return 0;
}
@@ -2462,12 +2469,6 @@ int r600_resume(struct radeon_device *rdev)
return r;
}
- r = r600_audio_init(rdev);
- if (r) {
- DRM_ERROR("radeon: audio resume failed\n");
- return r;
- }
-
return r;
}
@@ -2577,9 +2578,6 @@ int r600_init(struct radeon_device *rdev)
rdev->accel_working = false;
}
- r = r600_audio_init(rdev);
- if (r)
- return r; /* TODO error handling */
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 7c4fa77..79b5591 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -57,7 +57,7 @@ static bool radeon_dig_encoder(struct drm_encoder *encoder)
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
- return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE5(rdev))
+ return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE6(rdev))
|| rdev->family == CHIP_RS600
|| rdev->family == CHIP_RS690
|| rdev->family == CHIP_RS740;
@@ -192,6 +192,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
int base_rate = 48000;
switch (radeon_encoder->encoder_id) {
@@ -217,8 +218,8 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
- /* Some magic trigger or src sel? */
- WREG32_P(0x5ac, 0x01, ~0x77);
+ /* Select DTO source */
+ WREG32(0x5ac, radeon_crtc->crtc_id);
} else {
switch (dig->dig_encoder) {
case 0:
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0133f5f..ca87f7a 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -2079,6 +2079,48 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
break;
+ case PACKET3_STRMOUT_BASE_UPDATE:
+ if (p->family < CHIP_RV770) {
+ DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
+ return -EINVAL;
+ }
+ if (pkt->count != 1) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
+ return -EINVAL;
+ }
+ if (idx_value > 3) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
+ return -EINVAL;
+ }
+ {
+ u64 offset;
+
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
+ return -EINVAL;
+ }
+
+ if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
+ return -EINVAL;
+ }
+
+ offset = radeon_get_ib_value(p, idx+1) << 8;
+ if (offset != track->vgt_strmout_bo_offset[idx_value]) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
+ offset, track->vgt_strmout_bo_offset[idx_value]);
+ return -EINVAL;
+ }
+
+ if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ }
+ break;
case PACKET3_SURFACE_BASE_UPDATE:
if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 226379e..82a0a4c 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -322,9 +322,6 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset;
- if (ASIC_IS_DCE5(rdev))
- return;
-
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
return;
@@ -348,7 +345,6 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
- HDMI0_AUDIO_SEND_MAX_PACKETS | /* send NULL packets if no audio is available */
HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
}
@@ -484,7 +480,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
uint32_t offset;
u32 hdmi;
- if (ASIC_IS_DCE5(rdev))
+ if (ASIC_IS_DCE6(rdev))
return;
/* Silent, r600_hdmi_enable will raise WARN for us */
@@ -544,7 +540,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset;
- if (ASIC_IS_DCE5(rdev))
+ if (ASIC_IS_DCE6(rdev))
return;
/* Called for ATOM_ENCODER_MODE_HDMI only */
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index a0dbf1f..025fd5b 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -485,6 +485,7 @@
#define TC_L2_SIZE(x) ((x)<<5)
#define L2_DISABLE_LATE_HIT (1<<9)
+#define VC_ENHANCE 0x9714
#define VGT_CACHE_INVALIDATION 0x88C4
#define CACHE_INVALIDATION(x) ((x)<<0)
@@ -1163,6 +1164,7 @@
#define PACKET3_SET_CTL_CONST 0x6F
#define PACKET3_SET_CTL_CONST_OFFSET 0x0003cff0
#define PACKET3_SET_CTL_CONST_END 0x0003e200
+#define PACKET3_STRMOUT_BASE_UPDATE 0x72 /* r7xx */
#define PACKET3_SURFACE_BASE_UPDATE 0x73
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 85dac33..fefcca5 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1374,9 +1374,9 @@ struct cayman_asic {
struct si_asic {
unsigned max_shader_engines;
- unsigned max_pipes_per_simd;
unsigned max_tile_pipes;
- unsigned max_simds_per_se;
+ unsigned max_cu_per_sh;
+ unsigned max_sh_per_se;
unsigned max_backends_per_se;
unsigned max_texture_channel_caches;
unsigned max_gprs;
@@ -1387,7 +1387,6 @@ struct si_asic {
unsigned sc_hiz_tile_fifo_size;
unsigned sc_earlyz_tile_fifo_size;
- unsigned num_shader_engines;
unsigned num_tile_pipes;
unsigned num_backends_per_se;
unsigned backend_disable_mask_per_asic;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index f0bb2b5..2c4d53f 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -57,9 +57,11 @@
* 2.13.0 - virtual memory support, streamout
* 2.14.0 - add evergreen tiling informations
* 2.15.0 - add max_pipes query
+ * 2.16.0 - fix evergreen 2D tiled surface calculation
+ * 2.17.0 - add STRMOUT_BASE_UPDATE for r7xx
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 15
+#define KMS_DRIVER_MINOR 17
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 79db56e..84b648a 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -289,8 +289,9 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
rdev->vm_manager.enabled = false;
/* mark first vm as always in use, it's the system one */
+ /* allocate enough for 2 full VM pts */
r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
- rdev->vm_manager.max_pfn * 8,
+ rdev->vm_manager.max_pfn * 8 * 2,
RADEON_GEM_DOMAIN_VRAM);
if (r) {
dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -476,12 +477,18 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
mutex_lock(&vm->mutex);
if (last_pfn > vm->last_pfn) {
- /* grow va space 32M by 32M */
- unsigned align = ((32 << 20) >> 12) - 1;
+ /* release mutex and lock in right order */
+ mutex_unlock(&vm->mutex);
radeon_mutex_lock(&rdev->cs_mutex);
- radeon_vm_unbind_locked(rdev, vm);
+ mutex_lock(&vm->mutex);
+ /* and check again */
+ if (last_pfn > vm->last_pfn) {
+ /* grow va space 32M by 32M */
+ unsigned align = ((32 << 20) >> 12) - 1;
+ radeon_vm_unbind_locked(rdev, vm);
+ vm->last_pfn = (last_pfn + align) & ~align;
+ }
radeon_mutex_unlock(&rdev->cs_mutex);
- vm->last_pfn = (last_pfn + align) & ~align;
}
head = &vm->va;
last_offset = 0;
@@ -595,8 +602,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
if (bo_va == NULL)
return 0;
- mutex_lock(&vm->mutex);
radeon_mutex_lock(&rdev->cs_mutex);
+ mutex_lock(&vm->mutex);
radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
radeon_mutex_unlock(&rdev->cs_mutex);
list_del(&bo_va->vm_list);
@@ -627,7 +634,15 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
mutex_init(&vm->mutex);
INIT_LIST_HEAD(&vm->list);
INIT_LIST_HEAD(&vm->va);
- vm->last_pfn = 0;
+ /* SI requires equal sized PTs for all VMs, so always set
+ * last_pfn to max_pfn. cayman allows variable sized
+ * pts so we can grow then as needed. Once we switch
+ * to two level pts we can unify this again.
+ */
+ if (rdev->family >= CHIP_TAHITI)
+ vm->last_pfn = rdev->vm_manager.max_pfn;
+ else
+ vm->last_pfn = 0;
/* map the ib pool buffer at 0 in virtual address space, set
* read only
*/
@@ -641,9 +656,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
struct radeon_bo_va *bo_va, *tmp;
int r;
- mutex_lock(&vm->mutex);
-
radeon_mutex_lock(&rdev->cs_mutex);
+ mutex_lock(&vm->mutex);
radeon_vm_unbind_locked(rdev, vm);
radeon_mutex_unlock(&rdev->cs_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index f28bd4b..21ec9f5 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -292,6 +292,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
+ struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_busy *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
@@ -317,13 +318,14 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
break;
}
drm_gem_object_unreference_unlocked(gobj);
- r = radeon_gem_handle_lockup(robj->rdev, r);
+ r = radeon_gem_handle_lockup(rdev, r);
return r;
}
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
+ struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_wait_idle *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
@@ -336,10 +338,10 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
robj = gem_to_radeon_bo(gobj);
r = radeon_bo_wait(robj, NULL, false);
/* callback hw specific functions if any */
- if (robj->rdev->asic->ioctl_wait_idle)
- robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
+ if (rdev->asic->ioctl_wait_idle)
+ robj->rdev->asic->ioctl_wait_idle(rdev, robj);
drm_gem_object_unreference_unlocked(gobj);
- r = radeon_gem_handle_lockup(robj->rdev, r);
+ r = radeon_gem_handle_lockup(rdev, r);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index f1016a5..5c58d7d 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -273,7 +273,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
break;
case RADEON_INFO_MAX_PIPES:
if (rdev->family >= CHIP_TAHITI)
- value = rdev->config.si.max_pipes_per_simd;
+ value = rdev->config.si.max_cu_per_sh;
else if (rdev->family >= CHIP_CAYMAN)
value = rdev->config.cayman.max_pipes_per_simd;
else if (rdev->family >= CHIP_CEDAR)
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 0882554..5b37e28 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -801,9 +801,13 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
int i;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- not_processed += radeon_fence_count_emitted(rdev, i);
- if (not_processed >= 3)
- break;
+ struct radeon_ring *ring = &rdev->ring[i];
+
+ if (ring->ready) {
+ not_processed += radeon_fence_count_emitted(rdev, i);
+ if (not_processed >= 3)
+ break;
+ }
}
if (not_processed >= 3) { /* should upclock */
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 8ddab4c7..6bef46a 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -169,11 +169,17 @@ struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
struct radeon_bo *bo = gem_to_radeon_bo(obj);
int ret = 0;
+ ret = radeon_bo_reserve(bo, false);
+ if (unlikely(ret != 0))
+ return ERR_PTR(ret);
+
/* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
- if (ret)
+ if (ret) {
+ radeon_bo_unreserve(bo);
return ERR_PTR(ret);
-
+ }
+ radeon_bo_unreserve(bo);
return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
}
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 25f9eef..e95c5e6 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -908,12 +908,6 @@ static int rs600_startup(struct radeon_device *rdev)
return r;
}
- r = r600_audio_init(rdev);
- if (r) {
- dev_err(rdev->dev, "failed initializing audio\n");
- return r;
- }
-
r = radeon_ib_pool_start(rdev);
if (r)
return r;
@@ -922,6 +916,12 @@ static int rs600_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = r600_audio_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing audio\n");
+ return r;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 3277dde..159b6a4 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -637,12 +637,6 @@ static int rs690_startup(struct radeon_device *rdev)
return r;
}
- r = r600_audio_init(rdev);
- if (r) {
- dev_err(rdev->dev, "failed initializing audio\n");
- return r;
- }
-
r = radeon_ib_pool_start(rdev);
if (r)
return r;
@@ -651,6 +645,12 @@ static int rs690_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = r600_audio_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing audio\n");
+ return r;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 04ddc36..b4f51c5 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -616,6 +616,9 @@ static void rv770_gpu_init(struct radeon_device *rdev)
ACK_FLUSH_CTL(3) |
SYNC_FLUSH_CTL));
+ if (rdev->family != CHIP_RV770)
+ WREG32(SMX_SAR_CTL0, 0x00003f3f);
+
db_debug3 = RREG32(DB_DEBUG3);
db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
switch (rdev->family) {
@@ -792,7 +795,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
NUM_CLIP_SEQ(3)));
-
+ WREG32(VC_ENHANCE, 0);
}
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
@@ -956,6 +959,12 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = r600_audio_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: audio init failed\n");
+ return r;
+ }
+
return 0;
}
@@ -978,12 +987,6 @@ int rv770_resume(struct radeon_device *rdev)
return r;
}
- r = r600_audio_init(rdev);
- if (r) {
- dev_err(rdev->dev, "radeon: audio init failed\n");
- return r;
- }
-
return r;
}
@@ -1092,12 +1095,6 @@ int rv770_init(struct radeon_device *rdev)
rdev->accel_working = false;
}
- r = r600_audio_init(rdev);
- if (r) {
- dev_err(rdev->dev, "radeon: audio init failed\n");
- return r;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index fdc0898..b0adfc5 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -211,6 +211,7 @@
#define SCRATCH_UMSK 0x8540
#define SCRATCH_ADDR 0x8544
+#define SMX_SAR_CTL0 0xA008
#define SMX_DC_CTL0 0xA020
#define USE_HASH_FUNCTION (1 << 0)
#define CACHE_DEPTH(x) ((x) << 1)
@@ -310,6 +311,8 @@
#define TCP_CNTL 0x9610
#define TCP_CHAN_STEER 0x9614
+#define VC_ENHANCE 0x9714
+
#define VGT_CACHE_INVALIDATION 0x88C4
#define CACHE_INVALIDATION(x) ((x)<<0)
#define VC_ONLY 0
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 549732e..0b02792 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -867,200 +867,6 @@ void dce6_bandwidth_update(struct radeon_device *rdev)
/*
* Core functions
*/
-static u32 si_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
- u32 num_tile_pipes,
- u32 num_backends_per_asic,
- u32 *backend_disable_mask_per_asic,
- u32 num_shader_engines)
-{
- u32 backend_map = 0;
- u32 enabled_backends_mask = 0;
- u32 enabled_backends_count = 0;
- u32 num_backends_per_se;
- u32 cur_pipe;
- u32 swizzle_pipe[SI_MAX_PIPES];
- u32 cur_backend = 0;
- u32 i;
- bool force_no_swizzle;
-
- /* force legal values */
- if (num_tile_pipes < 1)
- num_tile_pipes = 1;
- if (num_tile_pipes > rdev->config.si.max_tile_pipes)
- num_tile_pipes = rdev->config.si.max_tile_pipes;
- if (num_shader_engines < 1)
- num_shader_engines = 1;
- if (num_shader_engines > rdev->config.si.max_shader_engines)
- num_shader_engines = rdev->config.si.max_shader_engines;
- if (num_backends_per_asic < num_shader_engines)
- num_backends_per_asic = num_shader_engines;
- if (num_backends_per_asic > (rdev->config.si.max_backends_per_se * num_shader_engines))
- num_backends_per_asic = rdev->config.si.max_backends_per_se * num_shader_engines;
-
- /* make sure we have the same number of backends per se */
- num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
- /* set up the number of backends per se */
- num_backends_per_se = num_backends_per_asic / num_shader_engines;
- if (num_backends_per_se > rdev->config.si.max_backends_per_se) {
- num_backends_per_se = rdev->config.si.max_backends_per_se;
- num_backends_per_asic = num_backends_per_se * num_shader_engines;
- }
-
- /* create enable mask and count for enabled backends */
- for (i = 0; i < SI_MAX_BACKENDS; ++i) {
- if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
- enabled_backends_mask |= (1 << i);
- ++enabled_backends_count;
- }
- if (enabled_backends_count == num_backends_per_asic)
- break;
- }
-
- /* force the backends mask to match the current number of backends */
- if (enabled_backends_count != num_backends_per_asic) {
- u32 this_backend_enabled;
- u32 shader_engine;
- u32 backend_per_se;
-
- enabled_backends_mask = 0;
- enabled_backends_count = 0;
- *backend_disable_mask_per_asic = SI_MAX_BACKENDS_MASK;
- for (i = 0; i < SI_MAX_BACKENDS; ++i) {
- /* calc the current se */
- shader_engine = i / rdev->config.si.max_backends_per_se;
- /* calc the backend per se */
- backend_per_se = i % rdev->config.si.max_backends_per_se;
- /* default to not enabled */
- this_backend_enabled = 0;
- if ((shader_engine < num_shader_engines) &&
- (backend_per_se < num_backends_per_se))
- this_backend_enabled = 1;
- if (this_backend_enabled) {
- enabled_backends_mask |= (1 << i);
- *backend_disable_mask_per_asic &= ~(1 << i);
- ++enabled_backends_count;
- }
- }
- }
-
-
- memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * SI_MAX_PIPES);
- switch (rdev->family) {
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- force_no_swizzle = true;
- break;
- default:
- force_no_swizzle = false;
- break;
- }
- if (force_no_swizzle) {
- bool last_backend_enabled = false;
-
- force_no_swizzle = false;
- for (i = 0; i < SI_MAX_BACKENDS; ++i) {
- if (((enabled_backends_mask >> i) & 1) == 1) {
- if (last_backend_enabled)
- force_no_swizzle = true;
- last_backend_enabled = true;
- } else
- last_backend_enabled = false;
- }
- }
-
- switch (num_tile_pipes) {
- case 1:
- case 3:
- case 5:
- case 7:
- DRM_ERROR("odd number of pipes!\n");
- break;
- case 2:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- break;
- case 4:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 1;
- swizzle_pipe[3] = 3;
- }
- break;
- case 6:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 1;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 5;
- }
- break;
- case 8:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- swizzle_pipe[6] = 6;
- swizzle_pipe[7] = 7;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 1;
- swizzle_pipe[5] = 3;
- swizzle_pipe[6] = 5;
- swizzle_pipe[7] = 7;
- }
- break;
- }
-
- for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
- while (((1 << cur_backend) & enabled_backends_mask) == 0)
- cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS;
-
- backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
-
- cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS;
- }
-
- return backend_map;
-}
-
-static u32 si_get_disable_mask_per_asic(struct radeon_device *rdev,
- u32 disable_mask_per_se,
- u32 max_disable_mask_per_se,
- u32 num_shader_engines)
-{
- u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
- u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
-
- if (num_shader_engines == 1)
- return disable_mask_per_asic;
- else if (num_shader_engines == 2)
- return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
- else
- return 0xffffffff;
-}
-
static void si_tiling_mode_table_init(struct radeon_device *rdev)
{
const u32 num_tile_mode_states = 32;
@@ -1562,18 +1368,151 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
}
+static void si_select_se_sh(struct radeon_device *rdev,
+ u32 se_num, u32 sh_num)
+{
+ u32 data = INSTANCE_BROADCAST_WRITES;
+
+ if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
+ data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+ else if (se_num == 0xffffffff)
+ data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
+ else if (sh_num == 0xffffffff)
+ data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
+ else
+ data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
+ WREG32(GRBM_GFX_INDEX, data);
+}
+
+static u32 si_create_bitmask(u32 bit_width)
+{
+ u32 i, mask = 0;
+
+ for (i = 0; i < bit_width; i++) {
+ mask <<= 1;
+ mask |= 1;
+ }
+ return mask;
+}
+
+static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
+{
+ u32 data, mask;
+
+ data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+ if (data & 1)
+ data &= INACTIVE_CUS_MASK;
+ else
+ data = 0;
+ data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+
+ data >>= INACTIVE_CUS_SHIFT;
+
+ mask = si_create_bitmask(cu_per_sh);
+
+ return ~data & mask;
+}
+
+static void si_setup_spi(struct radeon_device *rdev,
+ u32 se_num, u32 sh_per_se,
+ u32 cu_per_sh)
+{
+ int i, j, k;
+ u32 data, mask, active_cu;
+
+ for (i = 0; i < se_num; i++) {
+ for (j = 0; j < sh_per_se; j++) {
+ si_select_se_sh(rdev, i, j);
+ data = RREG32(SPI_STATIC_THREAD_MGMT_3);
+ active_cu = si_get_cu_enabled(rdev, cu_per_sh);
+
+ mask = 1;
+ for (k = 0; k < 16; k++) {
+ mask <<= k;
+ if (active_cu & mask) {
+ data &= ~mask;
+ WREG32(SPI_STATIC_THREAD_MGMT_3, data);
+ break;
+ }
+ }
+ }
+ }
+ si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
+static u32 si_get_rb_disabled(struct radeon_device *rdev,
+ u32 max_rb_num, u32 se_num,
+ u32 sh_per_se)
+{
+ u32 data, mask;
+
+ data = RREG32(CC_RB_BACKEND_DISABLE);
+ if (data & 1)
+ data &= BACKEND_DISABLE_MASK;
+ else
+ data = 0;
+ data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
+
+ data >>= BACKEND_DISABLE_SHIFT;
+
+ mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
+
+ return data & mask;
+}
+
+static void si_setup_rb(struct radeon_device *rdev,
+ u32 se_num, u32 sh_per_se,
+ u32 max_rb_num)
+{
+ int i, j;
+ u32 data, mask;
+ u32 disabled_rbs = 0;
+ u32 enabled_rbs = 0;
+
+ for (i = 0; i < se_num; i++) {
+ for (j = 0; j < sh_per_se; j++) {
+ si_select_se_sh(rdev, i, j);
+ data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
+ disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
+ }
+ }
+ si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+
+ mask = 1;
+ for (i = 0; i < max_rb_num; i++) {
+ if (!(disabled_rbs & mask))
+ enabled_rbs |= mask;
+ mask <<= 1;
+ }
+
+ for (i = 0; i < se_num; i++) {
+ si_select_se_sh(rdev, i, 0xffffffff);
+ data = 0;
+ for (j = 0; j < sh_per_se; j++) {
+ switch (enabled_rbs & 3) {
+ case 1:
+ data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
+ break;
+ case 2:
+ data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
+ break;
+ case 3:
+ default:
+ data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
+ break;
+ }
+ enabled_rbs >>= 2;
+ }
+ WREG32(PA_SC_RASTER_CONFIG, data);
+ }
+ si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
static void si_gpu_init(struct radeon_device *rdev)
{
- u32 cc_rb_backend_disable = 0;
- u32 cc_gc_shader_array_config;
u32 gb_addr_config = 0;
u32 mc_shared_chmap, mc_arb_ramcfg;
- u32 gb_backend_map;
- u32 cgts_tcc_disable;
u32 sx_debug_1;
- u32 gc_user_shader_array_config;
- u32 gc_user_rb_backend_disable;
- u32 cgts_user_tcc_disable;
u32 hdp_host_path_cntl;
u32 tmp;
int i, j;
@@ -1581,9 +1520,9 @@ static void si_gpu_init(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_TAHITI:
rdev->config.si.max_shader_engines = 2;
- rdev->config.si.max_pipes_per_simd = 4;
rdev->config.si.max_tile_pipes = 12;
- rdev->config.si.max_simds_per_se = 8;
+ rdev->config.si.max_cu_per_sh = 8;
+ rdev->config.si.max_sh_per_se = 2;
rdev->config.si.max_backends_per_se = 4;
rdev->config.si.max_texture_channel_caches = 12;
rdev->config.si.max_gprs = 256;
@@ -1594,12 +1533,13 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.sc_prim_fifo_size_backend = 0x100;
rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_PITCAIRN:
rdev->config.si.max_shader_engines = 2;
- rdev->config.si.max_pipes_per_simd = 4;
rdev->config.si.max_tile_pipes = 8;
- rdev->config.si.max_simds_per_se = 5;
+ rdev->config.si.max_cu_per_sh = 5;
+ rdev->config.si.max_sh_per_se = 2;
rdev->config.si.max_backends_per_se = 4;
rdev->config.si.max_texture_channel_caches = 8;
rdev->config.si.max_gprs = 256;
@@ -1610,13 +1550,14 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.sc_prim_fifo_size_backend = 0x100;
rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_VERDE:
default:
rdev->config.si.max_shader_engines = 1;
- rdev->config.si.max_pipes_per_simd = 4;
rdev->config.si.max_tile_pipes = 4;
- rdev->config.si.max_simds_per_se = 2;
+ rdev->config.si.max_cu_per_sh = 2;
+ rdev->config.si.max_sh_per_se = 2;
rdev->config.si.max_backends_per_se = 4;
rdev->config.si.max_texture_channel_caches = 4;
rdev->config.si.max_gprs = 256;
@@ -1627,6 +1568,7 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.sc_prim_fifo_size_backend = 0x40;
rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
break;
}
@@ -1648,31 +1590,7 @@ static void si_gpu_init(struct radeon_device *rdev)
mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
- cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
- cc_gc_shader_array_config = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
- cgts_tcc_disable = 0xffff0000;
- for (i = 0; i < rdev->config.si.max_texture_channel_caches; i++)
- cgts_tcc_disable &= ~(1 << (16 + i));
- gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
- gc_user_shader_array_config = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
- cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
-
- rdev->config.si.num_shader_engines = rdev->config.si.max_shader_engines;
rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
- tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
- rdev->config.si.num_backends_per_se = r600_count_pipe_bits(tmp);
- tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
- rdev->config.si.backend_disable_mask_per_asic =
- si_get_disable_mask_per_asic(rdev, tmp, SI_MAX_BACKENDS_PER_SE_MASK,
- rdev->config.si.num_shader_engines);
- rdev->config.si.backend_map =
- si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes,
- rdev->config.si.num_backends_per_se *
- rdev->config.si.num_shader_engines,
- &rdev->config.si.backend_disable_mask_per_asic,
- rdev->config.si.num_shader_engines);
- tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
- rdev->config.si.num_texture_channel_caches = r600_count_pipe_bits(tmp);
rdev->config.si.mem_max_burst_length_bytes = 256;
tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
@@ -1683,55 +1601,8 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.num_gpus = 1;
rdev->config.si.multi_gpu_tile_size = 64;
- gb_addr_config = 0;
- switch (rdev->config.si.num_tile_pipes) {
- case 1:
- gb_addr_config |= NUM_PIPES(0);
- break;
- case 2:
- gb_addr_config |= NUM_PIPES(1);
- break;
- case 4:
- gb_addr_config |= NUM_PIPES(2);
- break;
- case 8:
- default:
- gb_addr_config |= NUM_PIPES(3);
- break;
- }
-
- tmp = (rdev->config.si.mem_max_burst_length_bytes / 256) - 1;
- gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
- gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.si.num_shader_engines - 1);
- tmp = (rdev->config.si.shader_engine_tile_size / 16) - 1;
- gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
- switch (rdev->config.si.num_gpus) {
- case 1:
- default:
- gb_addr_config |= NUM_GPUS(0);
- break;
- case 2:
- gb_addr_config |= NUM_GPUS(1);
- break;
- case 4:
- gb_addr_config |= NUM_GPUS(2);
- break;
- }
- switch (rdev->config.si.multi_gpu_tile_size) {
- case 16:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
- break;
- case 32:
- default:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
- break;
- case 64:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
- break;
- case 128:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
- break;
- }
+ /* fix up row size */
+ gb_addr_config &= ~ROW_SIZE_MASK;
switch (rdev->config.si.mem_row_size_in_kb) {
case 1:
default:
@@ -1745,26 +1616,6 @@ static void si_gpu_init(struct radeon_device *rdev)
break;
}
- tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
- rdev->config.si.num_tile_pipes = (1 << tmp);
- tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
- rdev->config.si.mem_max_burst_length_bytes = (tmp + 1) * 256;
- tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
- rdev->config.si.num_shader_engines = tmp + 1;
- tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
- rdev->config.si.num_gpus = tmp + 1;
- tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
- rdev->config.si.multi_gpu_tile_size = 1 << tmp;
- tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
- rdev->config.si.mem_row_size_in_kb = 1 << tmp;
-
- gb_backend_map =
- si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes,
- rdev->config.si.num_backends_per_se *
- rdev->config.si.num_shader_engines,
- &rdev->config.si.backend_disable_mask_per_asic,
- rdev->config.si.num_shader_engines);
-
/* setup tiling info dword. gb_addr_config is not adequate since it does
* not have bank info, so create a custom tiling dword.
* bits 3:0 num_pipes
@@ -1789,33 +1640,29 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.tile_config |= (3 << 0);
break;
}
- rdev->config.si.tile_config |=
- ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
+ if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+ rdev->config.si.tile_config |= 1 << 4;
+ else
+ rdev->config.si.tile_config |= 0 << 4;
rdev->config.si.tile_config |=
((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
rdev->config.si.tile_config |=
((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
- rdev->config.si.backend_map = gb_backend_map;
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
- /* primary versions */
- WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(CC_GC_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config);
-
- WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
+ si_tiling_mode_table_init(rdev);
- /* user versions */
- WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(GC_USER_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config);
+ si_setup_rb(rdev, rdev->config.si.max_shader_engines,
+ rdev->config.si.max_sh_per_se,
+ rdev->config.si.max_backends_per_se);
- WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
+ si_setup_spi(rdev, rdev->config.si.max_shader_engines,
+ rdev->config.si.max_sh_per_se,
+ rdev->config.si.max_cu_per_sh);
- si_tiling_mode_table_init(rdev);
/* set HW defaults for 3D engine */
WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
@@ -2518,12 +2365,12 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
WREG32(0x15DC, 0);
/* empty context1-15 */
- /* FIXME start with 1G, once using 2 level pt switch to full
+ /* FIXME start with 4G, once using 2 level pt switch to full
* vm size space
*/
/* set vm size, must be a multiple of 4 */
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
- WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, (1 << 30) / RADEON_GPU_PAGE_SIZE);
+ WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
for (i = 1; i < 16; i++) {
if (i < 8)
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
diff --git a/drivers/gpu/drm/radeon/si_reg.h b/drivers/gpu/drm/radeon/si_reg.h
index eda938a..501f9d43 100644
--- a/drivers/gpu/drm/radeon/si_reg.h
+++ b/drivers/gpu/drm/radeon/si_reg.h
@@ -30,4 +30,76 @@
#define SI_DC_GPIO_HPD_EN 0x65b8
#define SI_DC_GPIO_HPD_Y 0x65bc
+#define SI_GRPH_CONTROL 0x6804
+# define SI_GRPH_DEPTH(x) (((x) & 0x3) << 0)
+# define SI_GRPH_DEPTH_8BPP 0
+# define SI_GRPH_DEPTH_16BPP 1
+# define SI_GRPH_DEPTH_32BPP 2
+# define SI_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
+# define SI_ADDR_SURF_2_BANK 0
+# define SI_ADDR_SURF_4_BANK 1
+# define SI_ADDR_SURF_8_BANK 2
+# define SI_ADDR_SURF_16_BANK 3
+# define SI_GRPH_Z(x) (((x) & 0x3) << 4)
+# define SI_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
+# define SI_ADDR_SURF_BANK_WIDTH_1 0
+# define SI_ADDR_SURF_BANK_WIDTH_2 1
+# define SI_ADDR_SURF_BANK_WIDTH_4 2
+# define SI_ADDR_SURF_BANK_WIDTH_8 3
+# define SI_GRPH_FORMAT(x) (((x) & 0x7) << 8)
+/* 8 BPP */
+# define SI_GRPH_FORMAT_INDEXED 0
+/* 16 BPP */
+# define SI_GRPH_FORMAT_ARGB1555 0
+# define SI_GRPH_FORMAT_ARGB565 1
+# define SI_GRPH_FORMAT_ARGB4444 2
+# define SI_GRPH_FORMAT_AI88 3
+# define SI_GRPH_FORMAT_MONO16 4
+# define SI_GRPH_FORMAT_BGRA5551 5
+/* 32 BPP */
+# define SI_GRPH_FORMAT_ARGB8888 0
+# define SI_GRPH_FORMAT_ARGB2101010 1
+# define SI_GRPH_FORMAT_32BPP_DIG 2
+# define SI_GRPH_FORMAT_8B_ARGB2101010 3
+# define SI_GRPH_FORMAT_BGRA1010102 4
+# define SI_GRPH_FORMAT_8B_BGRA1010102 5
+# define SI_GRPH_FORMAT_RGB111110 6
+# define SI_GRPH_FORMAT_BGR101111 7
+# define SI_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
+# define SI_ADDR_SURF_BANK_HEIGHT_1 0
+# define SI_ADDR_SURF_BANK_HEIGHT_2 1
+# define SI_ADDR_SURF_BANK_HEIGHT_4 2
+# define SI_ADDR_SURF_BANK_HEIGHT_8 3
+# define SI_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
+# define SI_ADDR_SURF_TILE_SPLIT_64B 0
+# define SI_ADDR_SURF_TILE_SPLIT_128B 1
+# define SI_ADDR_SURF_TILE_SPLIT_256B 2
+# define SI_ADDR_SURF_TILE_SPLIT_512B 3
+# define SI_ADDR_SURF_TILE_SPLIT_1KB 4
+# define SI_ADDR_SURF_TILE_SPLIT_2KB 5
+# define SI_ADDR_SURF_TILE_SPLIT_4KB 6
+# define SI_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_1 0
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_2 1
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_4 2
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_8 3
+# define SI_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
+# define SI_GRPH_ARRAY_LINEAR_GENERAL 0
+# define SI_GRPH_ARRAY_LINEAR_ALIGNED 1
+# define SI_GRPH_ARRAY_1D_TILED_THIN1 2
+# define SI_GRPH_ARRAY_2D_TILED_THIN1 4
+# define SI_GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24)
+# define SI_ADDR_SURF_P2 0
+# define SI_ADDR_SURF_P4_8x16 4
+# define SI_ADDR_SURF_P4_16x16 5
+# define SI_ADDR_SURF_P4_16x32 6
+# define SI_ADDR_SURF_P4_32x32 7
+# define SI_ADDR_SURF_P8_16x16_8x16 8
+# define SI_ADDR_SURF_P8_16x32_8x16 9
+# define SI_ADDR_SURF_P8_32x32_8x16 10
+# define SI_ADDR_SURF_P8_16x32_16x16 11
+# define SI_ADDR_SURF_P8_32x32_16x16 12
+# define SI_ADDR_SURF_P8_32x32_16x32 13
+# define SI_ADDR_SURF_P8_32x64_32x32 14
+
#endif
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 53ea2c4..db40679 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -24,6 +24,11 @@
#ifndef SI_H
#define SI_H
+#define TAHITI_RB_BITMAP_WIDTH_PER_SH 2
+
+#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
+#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
+
#define CG_MULT_THERMAL_STATUS 0x714
#define ASIC_MAX_TEMP(x) ((x) << 0)
#define ASIC_MAX_TEMP_MASK 0x000001ff
@@ -408,6 +413,12 @@
#define SOFT_RESET_IA (1 << 15)
#define GRBM_GFX_INDEX 0x802C
+#define INSTANCE_INDEX(x) ((x) << 0)
+#define SH_INDEX(x) ((x) << 8)
+#define SE_INDEX(x) ((x) << 16)
+#define SH_BROADCAST_WRITES (1 << 29)
+#define INSTANCE_BROADCAST_WRITES (1 << 30)
+#define SE_BROADCAST_WRITES (1 << 31)
#define GRBM_INT_CNTL 0x8060
# define RDERR_INT_ENABLE (1 << 0)
@@ -480,6 +491,8 @@
#define VGT_TF_MEMORY_BASE 0x89B8
#define CC_GC_SHADER_ARRAY_CONFIG 0x89bc
+#define INACTIVE_CUS_MASK 0xFFFF0000
+#define INACTIVE_CUS_SHIFT 16
#define GC_USER_SHADER_ARRAY_CONFIG 0x89c0
#define PA_CL_ENHANCE 0x8A14
@@ -688,6 +701,12 @@
#define RLC_MC_CNTL 0xC344
#define RLC_UCODE_CNTL 0xC348
+#define PA_SC_RASTER_CONFIG 0x28350
+# define RASTER_CONFIG_RB_MAP_0 0
+# define RASTER_CONFIG_RB_MAP_1 1
+# define RASTER_CONFIG_RB_MAP_2 2
+# define RASTER_CONFIG_RB_MAP_3 3
+
#define VGT_EVENT_INITIATOR 0x28a90
# define SAMPLE_STREAMOUTSTATS1 (1 << 0)
# define SAMPLE_STREAMOUTSTATS2 (2 << 0)
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 30d98d1..dd14cd1 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -47,9 +47,9 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv == NULL)
return -ENOMEM;
+ idr_init(&dev_priv->object_idr);
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = chipset;
- idr_init(&dev->object_name_idr);
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index b67cfca..36f4b28 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1204,6 +1204,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
(*destroy)(bo);
else
kfree(bo);
+ ttm_mem_global_free(mem_glob, acc_size);
return -EINVAL;
}
bo->destroy = destroy;
@@ -1307,22 +1308,14 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo;
- struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
size_t acc_size;
int ret;
- acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
- ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
- if (unlikely(ret != 0))
- return ret;
-
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
-
- if (unlikely(bo == NULL)) {
- ttm_mem_global_free(mem_glob, acc_size);
+ if (unlikely(bo == NULL))
return -ENOMEM;
- }
+ acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
buffer_start, interruptible,
persistent_swap_storage, acc_size, NULL, NULL);
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 4d02c46..6e52069 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -13,8 +13,21 @@
static struct drm_driver driver;
+/*
+ * There are many DisplayLink-based graphics products, all with unique PIDs.
+ * So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff)
+ * We also require a match on SubClass (0x00) and Protocol (0x00),
+ * which is compatible with all known USB 2.0 era graphics chips and firmware,
+ * but allows DisplayLink to increment those for any future incompatible chips
+ */
static struct usb_device_id id_table[] = {
- {.idVendor = 0x17e9, .match_flags = USB_DEVICE_ID_MATCH_VENDOR,},
+ {.idVendor = 0x17e9, .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0x00,
+ .bInterfaceProtocol = 0x00,
+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
+ USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS |
+ USB_DEVICE_ID_MATCH_INT_PROTOCOL,},
{},
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index a8d5f09..4c2d836 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -61,7 +61,7 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev,
u8 length;
u16 key;
- key = *((u16 *) desc);
+ key = le16_to_cpu(*((u16 *) desc));
desc += sizeof(u16);
length = *desc;
desc++;
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index 1f18225..c126182 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -100,12 +100,11 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv == NULL)
return -ENOMEM;
+ idr_init(&dev_priv->object_idr);
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = chipset;
- idr_init(&dev->object_name_idr);
-
pci_set_master(dev->pdev);
ret = drm_vblank_init(dev, 1);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 38f9534..5b3c7d1 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -190,6 +190,19 @@ find_active_client(struct list_head *head)
return NULL;
}
+int vga_switcheroo_get_client_state(struct pci_dev *pdev)
+{
+ struct vga_switcheroo_client *client;
+
+ client = find_client_from_pci(&vgasr_priv.clients, pdev);
+ if (!client)
+ return VGA_SWITCHEROO_NOT_FOUND;
+ if (!vgasr_priv.active)
+ return VGA_SWITCHEROO_INIT;
+ return client->pwr_state;
+}
+EXPORT_SYMBOL(vga_switcheroo_get_client_state);
+
void vga_switcheroo_unregister_client(struct pci_dev *pdev)
{
struct vga_switcheroo_client *client;
@@ -291,8 +304,6 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
vga_switchon(new_client);
vga_set_default_device(new_client->pdev);
- set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
-
return 0;
}
@@ -308,6 +319,8 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
active->active = false;
+ set_audio_state(active->id, VGA_SWITCHEROO_OFF);
+
if (new_client->fb_info) {
struct fb_event event;
event.info = new_client->fb_info;
@@ -321,11 +334,11 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
if (new_client->ops->reprobe)
new_client->ops->reprobe(new_client->pdev);
- set_audio_state(active->id, VGA_SWITCHEROO_OFF);
-
if (active->pwr_state == VGA_SWITCHEROO_ON)
vga_switchoff(active);
+ set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
+
new_client->active = true;
return 0;
}
@@ -371,8 +384,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
/* pwr off the device not in use */
if (strncmp(usercmd, "OFF", 3) == 0) {
list_for_each_entry(client, &vgasr_priv.clients, list) {
- if (client->active)
+ if (client->active || client_is_audio(client))
continue;
+ set_audio_state(client->id, VGA_SWITCHEROO_OFF);
if (client->pwr_state == VGA_SWITCHEROO_ON)
vga_switchoff(client);
}
@@ -381,10 +395,11 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
/* pwr on the device not in use */
if (strncmp(usercmd, "ON", 2) == 0) {
list_for_each_entry(client, &vgasr_priv.clients, list) {
- if (client->active)
+ if (client->active || client_is_audio(client))
continue;
if (client->pwr_state == VGA_SWITCHEROO_OFF)
vga_switchon(client);
+ set_audio_state(client->id, VGA_SWITCHEROO_ON);
}
goto out;
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 034c80a..bef04c1 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -1,20 +1,11 @@
#
# HID driver configuration
#
-menuconfig HID_SUPPORT
- bool "HID Devices"
- depends on INPUT
- default y
- ---help---
- Say Y here to get to see options for various computer-human interface
- device drivers. This option alone does not add any kernel code.
-
- If you say N, all options in this submenu will be skipped and disabled.
-
-if HID_SUPPORT
+menu "HID support"
+ depends on INPUT
config HID
- tristate "Generic HID support"
+ tristate "HID bus support"
depends on INPUT
default y
---help---
@@ -23,14 +14,17 @@ config HID
most commonly used to refer to the USB-HID specification, but other
devices (such as, but not strictly limited to, Bluetooth) are
designed using HID specification (this involves certain keyboards,
- mice, tablets, etc). This option compiles into kernel the generic
- HID layer code (parser, usages, etc.), which can then be used by
- transport-specific HID implementation (like USB or Bluetooth).
+ mice, tablets, etc). This option adds the HID bus to the kernel,
+ together with generic HID layer code. The HID devices are added and
+ removed from the HID bus by the transport-layer drivers, such as
+ usbhid (USB_HID) and hidp (BT_HIDP).
For docs and specs, see http://www.usb.org/developers/hidpage/
If unsure, say Y.
+if HID
+
config HID_BATTERY_STRENGTH
bool "Battery level reporting for HID devices"
depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY
@@ -59,23 +53,22 @@ config HIDRAW
If unsure, say Y.
-source "drivers/hid/usbhid/Kconfig"
-
-menu "Special HID drivers"
- depends on HID
-
config HID_GENERIC
tristate "Generic HID driver"
depends on HID
- default y
+ default HID
---help---
- Support for generic HID devices.
+ Support for generic devices on the HID bus. This includes most
+ keyboards and mice, joysticks, tablets and digitizers.
To compile this driver as a module, choose M here: the module
will be called hid-generic.
If unsure, say Y.
+menu "Special HID drivers"
+ depends on HID
+
config HID_A4TECH
tristate "A4 tech mice" if EXPERT
depends on USB_HID
@@ -662,4 +655,8 @@ config HID_ZYDACRON
endmenu
-endif # HID_SUPPORT
+endif # HID
+
+source "drivers/hid/usbhid/Kconfig"
+
+endmenu
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 8e3a6b2..6ac0286 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1880,6 +1880,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
{ HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 9373f53..d1cdd2d 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -160,6 +160,9 @@
#define USB_VENDOR_ID_AVERMEDIA 0x07ca
#define USB_DEVICE_ID_AVER_FM_MR800 0xb800
+#define USB_VENDOR_ID_AXENTIA 0x12cf
+#define USB_DEVICE_ID_AXENTIA_FM_RADIO 0x7111
+
#define USB_VENDOR_ID_BAANTO 0x2453
#define USB_DEVICE_ID_BAANTO_MT_190W2 0x0100
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 5e8a7ed..0f9c146 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -436,27 +436,37 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
{
- struct dj_report dj_report;
+ struct dj_report *dj_report;
+ int retval;
- memset(&dj_report, 0, sizeof(dj_report));
- dj_report.report_id = REPORT_ID_DJ_SHORT;
- dj_report.device_index = 0xFF;
- dj_report.report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
- return logi_dj_recv_send_report(djrcv_dev, &dj_report);
+ dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL);
+ if (!dj_report)
+ return -ENOMEM;
+ dj_report->report_id = REPORT_ID_DJ_SHORT;
+ dj_report->device_index = 0xFF;
+ dj_report->report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
+ retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
+ kfree(dj_report);
+ return retval;
}
static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
unsigned timeout)
{
- struct dj_report dj_report;
+ struct dj_report *dj_report;
+ int retval;
- memset(&dj_report, 0, sizeof(dj_report));
- dj_report.report_id = REPORT_ID_DJ_SHORT;
- dj_report.device_index = 0xFF;
- dj_report.report_type = REPORT_TYPE_CMD_SWITCH;
- dj_report.report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
- dj_report.report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
- return logi_dj_recv_send_report(djrcv_dev, &dj_report);
+ dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL);
+ if (!dj_report)
+ return -ENOMEM;
+ dj_report->report_id = REPORT_ID_DJ_SHORT;
+ dj_report->device_index = 0xFF;
+ dj_report->report_type = REPORT_TYPE_CMD_SWITCH;
+ dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
+ dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
+ retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
+ kfree(dj_report);
+ return retval;
}
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 7cf3ffe..40ac665 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -426,8 +426,10 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
__set_bit(EV_ABS, input->evbit);
input_set_abs_params(input, ABS_MT_TRACKING_ID, 0, 15, 0, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 4, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255, 4, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255 << 2,
+ 4, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255 << 2,
+ 4, 0);
input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
/* Note: Touch Y position from the device is inverted relative
diff --git a/drivers/hid/usbhid/Kconfig b/drivers/hid/usbhid/Kconfig
index 0f20fd1..0108c59 100644
--- a/drivers/hid/usbhid/Kconfig
+++ b/drivers/hid/usbhid/Kconfig
@@ -1,13 +1,13 @@
-comment "USB Input Devices"
+menu "USB HID support"
depends on USB
config USB_HID
- tristate "USB Human Interface Device (full HID) support"
+ tristate "USB HID transport layer"
default y
depends on USB && INPUT
select HID
---help---
- Say Y here if you want full HID support to connect USB keyboards,
+ Say Y here if you want to connect USB keyboards,
mice, joysticks, graphic tablets, or any other HID based devices
to your computer via USB, as well as Uninterruptible Power Supply
(UPS) and monitor control devices.
@@ -81,4 +81,4 @@ config USB_MOUSE
endmenu
-
+endmenu
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index f082e48..2cde9ec 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -8,7 +8,7 @@
*
* Based on hdaps.c driver:
* Copyright (C) 2005 Robert Love <rml@novell.com>
- * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com>
+ * Copyright (C) 2005 Jesper Juhl <jj@chaosbits.net>
*
* Fan control based on smcFanControl:
* Copyright (C) 2006 Hendrik Holtmann <holtmann@mac.com>
@@ -215,7 +215,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
int i;
if (send_command(cmd) || send_argument(key)) {
- pr_warn("%s: read arg fail\n", key);
+ pr_warn("%.4s: read arg fail\n", key);
return -EIO;
}
@@ -223,7 +223,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
for (i = 0; i < len; i++) {
if (__wait_status(0x05)) {
- pr_warn("%s: read data fail\n", key);
+ pr_warn("%.4s: read data fail\n", key);
return -EIO;
}
buffer[i] = inb(APPLESMC_DATA_PORT);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index b9d5123..637c51c 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -191,6 +191,24 @@ static ssize_t show_temp(struct device *dev,
return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
}
+struct tjmax {
+ char const *id;
+ int tjmax;
+};
+
+static struct tjmax __cpuinitconst tjmax_table[] = {
+ { "CPU D410", 100000 },
+ { "CPU D425", 100000 },
+ { "CPU D510", 100000 },
+ { "CPU D525", 100000 },
+ { "CPU N450", 100000 },
+ { "CPU N455", 100000 },
+ { "CPU N470", 100000 },
+ { "CPU N475", 100000 },
+ { "CPU 230", 100000 },
+ { "CPU 330", 125000 },
+};
+
static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
struct device *dev)
{
@@ -202,6 +220,13 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
int err;
u32 eax, edx;
struct pci_dev *host_bridge;
+ int i;
+
+ /* explicit tjmax table entries override heuristics */
+ for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
+ if (strstr(c->x86_model_id, tjmax_table[i].id))
+ return tjmax_table[i].tjmax;
+ }
/* Early chips have no MSR for TjMax */
@@ -210,7 +235,8 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
/* Atom CPUs */
- if (c->x86_model == 0x1c) {
+ if (c->x86_model == 0x1c || c->x86_model == 0x26
+ || c->x86_model == 0x27) {
usemsr_ee = 0;
host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
@@ -223,6 +249,9 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
tjmax = 90000;
pci_dev_put(host_bridge);
+ } else if (c->x86_model == 0x36) {
+ usemsr_ee = 0;
+ tjmax = 100000;
}
if (c->x86_model > 0xe && usemsr_ee) {
@@ -664,7 +693,7 @@ static void __cpuinit get_core_online(unsigned int cpu)
* sensors. We check this bit only, all the early CPUs
* without thermal sensors will be filtered out.
*/
- if (!cpu_has(c, X86_FEATURE_DTS))
+ if (!cpu_has(c, X86_FEATURE_DTHERM))
return;
if (!pdev) {
@@ -765,14 +794,14 @@ static struct notifier_block coretemp_cpu_notifier __refdata = {
};
static const struct x86_cpu_id coretemp_ids[] = {
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTS },
+ { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM },
{}
};
MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
static int __init coretemp_init(void)
{
- int i, err = -ENODEV;
+ int i, err;
/*
* CPUID.06H.EAX[0] indicates whether the CPU has thermal
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 9691f66..e7d234b 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -451,11 +451,15 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
data->fan_rpm_control = true;
break;
default:
- mutex_unlock(&data->update_lock);
- return -EINVAL;
+ count = -EINVAL;
+ goto err;
}
- read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
+ result = read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
+ if (result) {
+ count = result;
+ goto err;
+ }
if (data->fan_rpm_control)
conf_reg |= 0x80;
@@ -463,7 +467,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
conf_reg &= ~0x80;
i2c_smbus_write_byte_data(client, REG_FAN_CONF1, conf_reg);
-
+err:
mutex_unlock(&data->update_lock);
return count;
}
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index a9bfd67..e72ba5d 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -590,6 +590,6 @@ abort:
module_i2c_driver(jc42_driver);
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
MODULE_DESCRIPTION("JC42 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lineage-pem.c b/drivers/hwmon/lineage-pem.c
index d264937..bd75d24 100644
--- a/drivers/hwmon/lineage-pem.c
+++ b/drivers/hwmon/lineage-pem.c
@@ -567,6 +567,6 @@ static struct i2c_driver pem_driver = {
module_i2c_driver(pem_driver);
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
MODULE_DESCRIPTION("Lineage CPL PEM hardware monitoring driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c
index 069b7d34..77476a5 100644
--- a/drivers/hwmon/ltc4261.c
+++ b/drivers/hwmon/ltc4261.c
@@ -292,6 +292,6 @@ static struct i2c_driver ltc4261_driver = {
module_i2c_driver(ltc4261_driver);
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
MODULE_DESCRIPTION("LTC4261 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index 822261b..019427d 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -692,6 +692,6 @@ static struct i2c_driver max16065_driver = {
module_i2c_driver(max16065_driver);
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
MODULE_DESCRIPTION("MAX16065 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index beb2491..a0edd98 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -37,4 +37,16 @@ config I2C_MUX_PCA954x
This driver can also be built as a module. If so, the module
will be called i2c-mux-pca954x.
+config I2C_MUX_PINCTRL
+ tristate "pinctrl-based I2C multiplexer"
+ depends on PINCTRL
+ help
+ If you say yes to this option, support will be included for an I2C
+ multiplexer that uses the pinctrl subsystem, i.e. pin multiplexing.
+ This is useful for SoCs whose I2C module's signals can be routed to
+ different sets of pins at run-time.
+
+ This driver can also be built as a module. If so, the module will be
+ called pinctrl-i2cmux.
+
endmenu
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index 5826249..76da869 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -4,5 +4,6 @@
obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o
obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o
obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o
+obj-$(CONFIG_I2C_MUX_PINCTRL) += i2c-mux-pinctrl.o
ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
new file mode 100644
index 0000000..46a6697
--- /dev/null
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -0,0 +1,279 @@
+/*
+ * I2C multiplexer using pinctrl API
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_i2c.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/i2c-mux-pinctrl.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct i2c_mux_pinctrl {
+ struct device *dev;
+ struct i2c_mux_pinctrl_platform_data *pdata;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state **states;
+ struct pinctrl_state *state_idle;
+ struct i2c_adapter *parent;
+ struct i2c_adapter **busses;
+};
+
+static int i2c_mux_pinctrl_select(struct i2c_adapter *adap, void *data,
+ u32 chan)
+{
+ struct i2c_mux_pinctrl *mux = data;
+
+ return pinctrl_select_state(mux->pinctrl, mux->states[chan]);
+}
+
+static int i2c_mux_pinctrl_deselect(struct i2c_adapter *adap, void *data,
+ u32 chan)
+{
+ struct i2c_mux_pinctrl *mux = data;
+
+ return pinctrl_select_state(mux->pinctrl, mux->state_idle);
+}
+
+#ifdef CONFIG_OF
+static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
+ struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int num_names, i, ret;
+ struct device_node *adapter_np;
+ struct i2c_adapter *adapter;
+
+ if (!np)
+ return 0;
+
+ mux->pdata = devm_kzalloc(&pdev->dev, sizeof(*mux->pdata), GFP_KERNEL);
+ if (!mux->pdata) {
+ dev_err(mux->dev,
+ "Cannot allocate i2c_mux_pinctrl_platform_data\n");
+ return -ENOMEM;
+ }
+
+ num_names = of_property_count_strings(np, "pinctrl-names");
+ if (num_names < 0) {
+ dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n",
+ num_names);
+ return num_names;
+ }
+
+ mux->pdata->pinctrl_states = devm_kzalloc(&pdev->dev,
+ sizeof(*mux->pdata->pinctrl_states) * num_names,
+ GFP_KERNEL);
+ if (!mux->pdata->pinctrl_states) {
+ dev_err(mux->dev, "Cannot allocate pinctrl_states\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_names; i++) {
+ ret = of_property_read_string_index(np, "pinctrl-names", i,
+ &mux->pdata->pinctrl_states[mux->pdata->bus_count]);
+ if (ret < 0) {
+ dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n",
+ ret);
+ return ret;
+ }
+ if (!strcmp(mux->pdata->pinctrl_states[mux->pdata->bus_count],
+ "idle")) {
+ if (i != num_names - 1) {
+ dev_err(mux->dev, "idle state must be last\n");
+ return -EINVAL;
+ }
+ mux->pdata->pinctrl_state_idle = "idle";
+ } else {
+ mux->pdata->bus_count++;
+ }
+ }
+
+ adapter_np = of_parse_phandle(np, "i2c-parent", 0);
+ if (!adapter_np) {
+ dev_err(mux->dev, "Cannot parse i2c-parent\n");
+ return -ENODEV;
+ }
+ adapter = of_find_i2c_adapter_by_node(adapter_np);
+ if (!adapter) {
+ dev_err(mux->dev, "Cannot find parent bus\n");
+ return -ENODEV;
+ }
+ mux->pdata->parent_bus_num = i2c_adapter_id(adapter);
+ put_device(&adapter->dev);
+
+ return 0;
+}
+#else
+static inline int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
+ struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
+static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev)
+{
+ struct i2c_mux_pinctrl *mux;
+ int (*deselect)(struct i2c_adapter *, void *, u32);
+ int i, ret;
+
+ mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux) {
+ dev_err(&pdev->dev, "Cannot allocate i2c_mux_pinctrl\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ platform_set_drvdata(pdev, mux);
+
+ mux->dev = &pdev->dev;
+
+ mux->pdata = pdev->dev.platform_data;
+ if (!mux->pdata) {
+ ret = i2c_mux_pinctrl_parse_dt(mux, pdev);
+ if (ret < 0)
+ goto err;
+ }
+ if (!mux->pdata) {
+ dev_err(&pdev->dev, "Missing platform data\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ mux->states = devm_kzalloc(&pdev->dev,
+ sizeof(*mux->states) * mux->pdata->bus_count,
+ GFP_KERNEL);
+ if (!mux->states) {
+ dev_err(&pdev->dev, "Cannot allocate states\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ mux->busses = devm_kzalloc(&pdev->dev,
+ sizeof(mux->busses) * mux->pdata->bus_count,
+ GFP_KERNEL);
+ if (!mux->states) {
+ dev_err(&pdev->dev, "Cannot allocate busses\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ mux->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(mux->pinctrl)) {
+ ret = PTR_ERR(mux->pinctrl);
+ dev_err(&pdev->dev, "Cannot get pinctrl: %d\n", ret);
+ goto err;
+ }
+ for (i = 0; i < mux->pdata->bus_count; i++) {
+ mux->states[i] = pinctrl_lookup_state(mux->pinctrl,
+ mux->pdata->pinctrl_states[i]);
+ if (IS_ERR(mux->states[i])) {
+ ret = PTR_ERR(mux->states[i]);
+ dev_err(&pdev->dev,
+ "Cannot look up pinctrl state %s: %d\n",
+ mux->pdata->pinctrl_states[i], ret);
+ goto err;
+ }
+ }
+ if (mux->pdata->pinctrl_state_idle) {
+ mux->state_idle = pinctrl_lookup_state(mux->pinctrl,
+ mux->pdata->pinctrl_state_idle);
+ if (IS_ERR(mux->state_idle)) {
+ ret = PTR_ERR(mux->state_idle);
+ dev_err(&pdev->dev,
+ "Cannot look up pinctrl state %s: %d\n",
+ mux->pdata->pinctrl_state_idle, ret);
+ goto err;
+ }
+
+ deselect = i2c_mux_pinctrl_deselect;
+ } else {
+ deselect = NULL;
+ }
+
+ mux->parent = i2c_get_adapter(mux->pdata->parent_bus_num);
+ if (!mux->parent) {
+ dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
+ mux->pdata->parent_bus_num);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ for (i = 0; i < mux->pdata->bus_count; i++) {
+ u32 bus = mux->pdata->base_bus_num ?
+ (mux->pdata->base_bus_num + i) : 0;
+
+ mux->busses[i] = i2c_add_mux_adapter(mux->parent, &pdev->dev,
+ mux, bus, i,
+ i2c_mux_pinctrl_select,
+ deselect);
+ if (!mux->busses[i]) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
+ goto err_del_adapter;
+ }
+ }
+
+ return 0;
+
+err_del_adapter:
+ for (; i > 0; i--)
+ i2c_del_mux_adapter(mux->busses[i - 1]);
+ i2c_put_adapter(mux->parent);
+err:
+ return ret;
+}
+
+static int __devexit i2c_mux_pinctrl_remove(struct platform_device *pdev)
+{
+ struct i2c_mux_pinctrl *mux = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < mux->pdata->bus_count; i++)
+ i2c_del_mux_adapter(mux->busses[i]);
+
+ i2c_put_adapter(mux->parent);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id i2c_mux_pinctrl_of_match[] __devinitconst = {
+ { .compatible = "i2c-mux-pinctrl", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_mux_pinctrl_of_match);
+#endif
+
+static struct platform_driver i2c_mux_pinctrl_driver = {
+ .driver = {
+ .name = "i2c-mux-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(i2c_mux_pinctrl_of_match),
+ },
+ .probe = i2c_mux_pinctrl_probe,
+ .remove = __devexit_p(i2c_mux_pinctrl_remove),
+};
+module_platform_driver(i2c_mux_pinctrl_driver);
+
+MODULE_DESCRIPTION("pinctrl-based I2C multiplexer driver");
+MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:i2c-mux-pinctrl");
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 8716066..bcb507b 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -236,7 +236,7 @@ static const struct ide_port_ops icside_v6_no_dma_port_ops = {
*/
static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- unsigned long cycle_time;
+ unsigned long cycle_time = 0;
int use_dma_info = 0;
const u8 xfer_mode = drive->dma_mode;
@@ -271,9 +271,9 @@ static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
ide_set_drivedata(drive, (void *)cycle_time);
- printk("%s: %s selected (peak %dMB/s)\n", drive->name,
- ide_xfer_verbose(xfer_mode),
- 2000 / (unsigned long)ide_get_drivedata(drive));
+ printk(KERN_INFO "%s: %s selected (peak %luMB/s)\n",
+ drive->name, ide_xfer_verbose(xfer_mode),
+ 2000 / (cycle_time ? cycle_time : (unsigned long) -1));
}
static const struct ide_port_ops icside_v6_port_ops = {
@@ -375,8 +375,6 @@ static const struct ide_dma_ops icside_v6_dma_ops = {
.dma_test_irq = icside_dma_test_irq,
.dma_lost_irq = ide_dma_lost_irq,
};
-#else
-#define icside_v6_dma_ops NULL
#endif
static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
@@ -456,7 +454,6 @@ err_free:
static const struct ide_port_info icside_v6_port_info __initdata = {
.init_dma = icside_dma_off_init,
.port_ops = &icside_v6_no_dma_port_ops,
- .dma_ops = &icside_v6_dma_ops,
.host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
.mwdma_mask = ATA_MWDMA2,
.swdma_mask = ATA_SWDMA2,
@@ -518,11 +515,13 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
ecard_set_drvdata(ec, state);
+#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
d.init_dma = icside_dma_init;
d.port_ops = &icside_v6_port_ops;
- } else
- d.dma_ops = NULL;
+ d.dma_ops = &icside_v6_dma_ops;
+ }
+#endif
ret = ide_host_register(host, &d, hws);
if (ret)
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index 28e344e..f1e922e 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -167,7 +167,8 @@ static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data)
{
int *is_kme = priv_data;
- if (!(pdev->resource[0]->flags & IO_DATA_PATH_WIDTH_8)) {
+ if ((pdev->resource[0]->flags & IO_DATA_PATH_WIDTH)
+ != IO_DATA_PATH_WIDTH_8) {
pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
}
diff --git a/drivers/ieee802154/Kconfig b/drivers/ieee802154/Kconfig
index 15c0640..1fc4eef 100644
--- a/drivers/ieee802154/Kconfig
+++ b/drivers/ieee802154/Kconfig
@@ -19,6 +19,7 @@ config IEEE802154_FAKEHARD
This driver can also be built as a module. To do so say M here.
The module will be called 'fakehard'.
+
config IEEE802154_FAKELB
depends on IEEE802154_DRIVERS && MAC802154
tristate "IEEE 802.15.4 loopback driver"
@@ -28,3 +29,8 @@ config IEEE802154_FAKELB
This driver can also be built as a module. To do so say M here.
The module will be called 'fakelb'.
+
+config IEEE802154_AT86RF230
+ depends on IEEE802154_DRIVERS && MAC802154
+ tristate "AT86RF230/231 transceiver driver"
+ depends on SPI
diff --git a/drivers/ieee802154/Makefile b/drivers/ieee802154/Makefile
index ea784ea..4f4371d 100644
--- a/drivers/ieee802154/Makefile
+++ b/drivers/ieee802154/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o
obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
+obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
diff --git a/drivers/ieee802154/at86rf230.c b/drivers/ieee802154/at86rf230.c
new file mode 100644
index 0000000..5d30940
--- /dev/null
+++ b/drivers/ieee802154/at86rf230.c
@@ -0,0 +1,968 @@
+/*
+ * AT86RF230/RF231 driver
+ *
+ * Copyright (C) 2009-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/at86rf230.h>
+#include <linux/skbuff.h>
+
+#include <net/mac802154.h>
+#include <net/wpan-phy.h>
+
+struct at86rf230_local {
+ struct spi_device *spi;
+ int rstn, slp_tr, dig2;
+
+ u8 part;
+ u8 vers;
+
+ u8 buf[2];
+ struct mutex bmux;
+
+ struct work_struct irqwork;
+ struct completion tx_complete;
+
+ struct ieee802154_dev *dev;
+
+ spinlock_t lock;
+ bool irq_disabled;
+ bool is_tx;
+};
+
+#define RG_TRX_STATUS (0x01)
+#define SR_TRX_STATUS 0x01, 0x1f, 0
+#define SR_RESERVED_01_3 0x01, 0x20, 5
+#define SR_CCA_STATUS 0x01, 0x40, 6
+#define SR_CCA_DONE 0x01, 0x80, 7
+#define RG_TRX_STATE (0x02)
+#define SR_TRX_CMD 0x02, 0x1f, 0
+#define SR_TRAC_STATUS 0x02, 0xe0, 5
+#define RG_TRX_CTRL_0 (0x03)
+#define SR_CLKM_CTRL 0x03, 0x07, 0
+#define SR_CLKM_SHA_SEL 0x03, 0x08, 3
+#define SR_PAD_IO_CLKM 0x03, 0x30, 4
+#define SR_PAD_IO 0x03, 0xc0, 6
+#define RG_TRX_CTRL_1 (0x04)
+#define SR_IRQ_POLARITY 0x04, 0x01, 0
+#define SR_IRQ_MASK_MODE 0x04, 0x02, 1
+#define SR_SPI_CMD_MODE 0x04, 0x0c, 2
+#define SR_RX_BL_CTRL 0x04, 0x10, 4
+#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5
+#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6
+#define SR_PA_EXT_EN 0x04, 0x80, 7
+#define RG_PHY_TX_PWR (0x05)
+#define SR_TX_PWR 0x05, 0x0f, 0
+#define SR_PA_LT 0x05, 0x30, 4
+#define SR_PA_BUF_LT 0x05, 0xc0, 6
+#define RG_PHY_RSSI (0x06)
+#define SR_RSSI 0x06, 0x1f, 0
+#define SR_RND_VALUE 0x06, 0x60, 5
+#define SR_RX_CRC_VALID 0x06, 0x80, 7
+#define RG_PHY_ED_LEVEL (0x07)
+#define SR_ED_LEVEL 0x07, 0xff, 0
+#define RG_PHY_CC_CCA (0x08)
+#define SR_CHANNEL 0x08, 0x1f, 0
+#define SR_CCA_MODE 0x08, 0x60, 5
+#define SR_CCA_REQUEST 0x08, 0x80, 7
+#define RG_CCA_THRES (0x09)
+#define SR_CCA_ED_THRES 0x09, 0x0f, 0
+#define SR_RESERVED_09_1 0x09, 0xf0, 4
+#define RG_RX_CTRL (0x0a)
+#define SR_PDT_THRES 0x0a, 0x0f, 0
+#define SR_RESERVED_0a_1 0x0a, 0xf0, 4
+#define RG_SFD_VALUE (0x0b)
+#define SR_SFD_VALUE 0x0b, 0xff, 0
+#define RG_TRX_CTRL_2 (0x0c)
+#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0
+#define SR_RESERVED_0c_2 0x0c, 0x7c, 2
+#define SR_RX_SAFE_MODE 0x0c, 0x80, 7
+#define RG_ANT_DIV (0x0d)
+#define SR_ANT_CTRL 0x0d, 0x03, 0
+#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2
+#define SR_ANT_DIV_EN 0x0d, 0x08, 3
+#define SR_RESERVED_0d_2 0x0d, 0x70, 4
+#define SR_ANT_SEL 0x0d, 0x80, 7
+#define RG_IRQ_MASK (0x0e)
+#define SR_IRQ_MASK 0x0e, 0xff, 0
+#define RG_IRQ_STATUS (0x0f)
+#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0
+#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1
+#define SR_IRQ_2_RX_START 0x0f, 0x04, 2
+#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3
+#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4
+#define SR_IRQ_5_AMI 0x0f, 0x20, 5
+#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6
+#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7
+#define RG_VREG_CTRL (0x10)
+#define SR_RESERVED_10_6 0x10, 0x03, 0
+#define SR_DVDD_OK 0x10, 0x04, 2
+#define SR_DVREG_EXT 0x10, 0x08, 3
+#define SR_RESERVED_10_3 0x10, 0x30, 4
+#define SR_AVDD_OK 0x10, 0x40, 6
+#define SR_AVREG_EXT 0x10, 0x80, 7
+#define RG_BATMON (0x11)
+#define SR_BATMON_VTH 0x11, 0x0f, 0
+#define SR_BATMON_HR 0x11, 0x10, 4
+#define SR_BATMON_OK 0x11, 0x20, 5
+#define SR_RESERVED_11_1 0x11, 0xc0, 6
+#define RG_XOSC_CTRL (0x12)
+#define SR_XTAL_TRIM 0x12, 0x0f, 0
+#define SR_XTAL_MODE 0x12, 0xf0, 4
+#define RG_RX_SYN (0x15)
+#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0
+#define SR_RESERVED_15_2 0x15, 0x70, 4
+#define SR_RX_PDT_DIS 0x15, 0x80, 7
+#define RG_XAH_CTRL_1 (0x17)
+#define SR_RESERVED_17_8 0x17, 0x01, 0
+#define SR_AACK_PROM_MODE 0x17, 0x02, 1
+#define SR_AACK_ACK_TIME 0x17, 0x04, 2
+#define SR_RESERVED_17_5 0x17, 0x08, 3
+#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4
+#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5
+#define SR_RESERVED_17_2 0x17, 0x40, 6
+#define SR_RESERVED_17_1 0x17, 0x80, 7
+#define RG_FTN_CTRL (0x18)
+#define SR_RESERVED_18_2 0x18, 0x7f, 0
+#define SR_FTN_START 0x18, 0x80, 7
+#define RG_PLL_CF (0x1a)
+#define SR_RESERVED_1a_2 0x1a, 0x7f, 0
+#define SR_PLL_CF_START 0x1a, 0x80, 7
+#define RG_PLL_DCU (0x1b)
+#define SR_RESERVED_1b_3 0x1b, 0x3f, 0
+#define SR_RESERVED_1b_2 0x1b, 0x40, 6
+#define SR_PLL_DCU_START 0x1b, 0x80, 7
+#define RG_PART_NUM (0x1c)
+#define SR_PART_NUM 0x1c, 0xff, 0
+#define RG_VERSION_NUM (0x1d)
+#define SR_VERSION_NUM 0x1d, 0xff, 0
+#define RG_MAN_ID_0 (0x1e)
+#define SR_MAN_ID_0 0x1e, 0xff, 0
+#define RG_MAN_ID_1 (0x1f)
+#define SR_MAN_ID_1 0x1f, 0xff, 0
+#define RG_SHORT_ADDR_0 (0x20)
+#define SR_SHORT_ADDR_0 0x20, 0xff, 0
+#define RG_SHORT_ADDR_1 (0x21)
+#define SR_SHORT_ADDR_1 0x21, 0xff, 0
+#define RG_PAN_ID_0 (0x22)
+#define SR_PAN_ID_0 0x22, 0xff, 0
+#define RG_PAN_ID_1 (0x23)
+#define SR_PAN_ID_1 0x23, 0xff, 0
+#define RG_IEEE_ADDR_0 (0x24)
+#define SR_IEEE_ADDR_0 0x24, 0xff, 0
+#define RG_IEEE_ADDR_1 (0x25)
+#define SR_IEEE_ADDR_1 0x25, 0xff, 0
+#define RG_IEEE_ADDR_2 (0x26)
+#define SR_IEEE_ADDR_2 0x26, 0xff, 0
+#define RG_IEEE_ADDR_3 (0x27)
+#define SR_IEEE_ADDR_3 0x27, 0xff, 0
+#define RG_IEEE_ADDR_4 (0x28)
+#define SR_IEEE_ADDR_4 0x28, 0xff, 0
+#define RG_IEEE_ADDR_5 (0x29)
+#define SR_IEEE_ADDR_5 0x29, 0xff, 0
+#define RG_IEEE_ADDR_6 (0x2a)
+#define SR_IEEE_ADDR_6 0x2a, 0xff, 0
+#define RG_IEEE_ADDR_7 (0x2b)
+#define SR_IEEE_ADDR_7 0x2b, 0xff, 0
+#define RG_XAH_CTRL_0 (0x2c)
+#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0
+#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1
+#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4
+#define RG_CSMA_SEED_0 (0x2d)
+#define SR_CSMA_SEED_0 0x2d, 0xff, 0
+#define RG_CSMA_SEED_1 (0x2e)
+#define SR_CSMA_SEED_1 0x2e, 0x07, 0
+#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3
+#define SR_AACK_DIS_ACK 0x2e, 0x10, 4
+#define SR_AACK_SET_PD 0x2e, 0x20, 5
+#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6
+#define RG_CSMA_BE (0x2f)
+#define SR_MIN_BE 0x2f, 0x0f, 0
+#define SR_MAX_BE 0x2f, 0xf0, 4
+
+#define CMD_REG 0x80
+#define CMD_REG_MASK 0x3f
+#define CMD_WRITE 0x40
+#define CMD_FB 0x20
+
+#define IRQ_BAT_LOW (1 << 7)
+#define IRQ_TRX_UR (1 << 6)
+#define IRQ_AMI (1 << 5)
+#define IRQ_CCA_ED (1 << 4)
+#define IRQ_TRX_END (1 << 3)
+#define IRQ_RX_START (1 << 2)
+#define IRQ_PLL_UNL (1 << 1)
+#define IRQ_PLL_LOCK (1 << 0)
+
+#define STATE_P_ON 0x00 /* BUSY */
+#define STATE_BUSY_RX 0x01
+#define STATE_BUSY_TX 0x02
+#define STATE_FORCE_TRX_OFF 0x03
+#define STATE_FORCE_TX_ON 0x04 /* IDLE */
+/* 0x05 */ /* INVALID_PARAMETER */
+#define STATE_RX_ON 0x06
+/* 0x07 */ /* SUCCESS */
+#define STATE_TRX_OFF 0x08
+#define STATE_TX_ON 0x09
+/* 0x0a - 0x0e */ /* 0x0a - UNSUPPORTED_ATTRIBUTE */
+#define STATE_SLEEP 0x0F
+#define STATE_BUSY_RX_AACK 0x11
+#define STATE_BUSY_TX_ARET 0x12
+#define STATE_BUSY_RX_AACK_ON 0x16
+#define STATE_BUSY_TX_ARET_ON 0x19
+#define STATE_RX_ON_NOCLK 0x1C
+#define STATE_RX_AACK_ON_NOCLK 0x1D
+#define STATE_BUSY_RX_AACK_NOCLK 0x1E
+#define STATE_TRANSITION_IN_PROGRESS 0x1F
+
+static int
+__at86rf230_write(struct at86rf230_local *lp, u8 addr, u8 data)
+{
+ u8 *buf = lp->buf;
+ int status;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = 2,
+ .tx_buf = buf,
+ };
+
+ buf[0] = (addr & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
+ buf[1] = data;
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ status = spi_sync(lp->spi, &msg);
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ if (msg.status)
+ status = msg.status;
+
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ return status;
+}
+
+static int
+__at86rf230_read_subreg(struct at86rf230_local *lp,
+ u8 addr, u8 mask, int shift, u8 *data)
+{
+ u8 *buf = lp->buf;
+ int status;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = 2,
+ .tx_buf = buf,
+ .rx_buf = buf,
+ };
+
+ buf[0] = (addr & CMD_REG_MASK) | CMD_REG;
+ buf[1] = 0xff;
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ status = spi_sync(lp->spi, &msg);
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ if (msg.status)
+ status = msg.status;
+
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ if (status == 0)
+ *data = buf[1];
+
+ return status;
+}
+
+static int
+at86rf230_read_subreg(struct at86rf230_local *lp,
+ u8 addr, u8 mask, int shift, u8 *data)
+{
+ int status;
+
+ mutex_lock(&lp->bmux);
+ status = __at86rf230_read_subreg(lp, addr, mask, shift, data);
+ mutex_unlock(&lp->bmux);
+
+ return status;
+}
+
+static int
+at86rf230_write_subreg(struct at86rf230_local *lp,
+ u8 addr, u8 mask, int shift, u8 data)
+{
+ int status;
+ u8 val;
+
+ mutex_lock(&lp->bmux);
+ status = __at86rf230_read_subreg(lp, addr, 0xff, 0, &val);
+ if (status)
+ goto out;
+
+ val &= ~mask;
+ val |= (data << shift) & mask;
+
+ status = __at86rf230_write(lp, addr, val);
+out:
+ mutex_unlock(&lp->bmux);
+
+ return status;
+}
+
+static int
+at86rf230_write_fbuf(struct at86rf230_local *lp, u8 *data, u8 len)
+{
+ u8 *buf = lp->buf;
+ int status;
+ struct spi_message msg;
+ struct spi_transfer xfer_head = {
+ .len = 2,
+ .tx_buf = buf,
+
+ };
+ struct spi_transfer xfer_buf = {
+ .len = len,
+ .tx_buf = data,
+ };
+
+ mutex_lock(&lp->bmux);
+ buf[0] = CMD_WRITE | CMD_FB;
+ buf[1] = len + 2; /* 2 bytes for CRC that isn't written */
+
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer_head, &msg);
+ spi_message_add_tail(&xfer_buf, &msg);
+
+ status = spi_sync(lp->spi, &msg);
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ if (msg.status)
+ status = msg.status;
+
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ mutex_unlock(&lp->bmux);
+ return status;
+}
+
+static int
+at86rf230_read_fbuf(struct at86rf230_local *lp, u8 *data, u8 *len, u8 *lqi)
+{
+ u8 *buf = lp->buf;
+ int status;
+ struct spi_message msg;
+ struct spi_transfer xfer_head = {
+ .len = 2,
+ .tx_buf = buf,
+ .rx_buf = buf,
+ };
+ struct spi_transfer xfer_head1 = {
+ .len = 2,
+ .tx_buf = buf,
+ .rx_buf = buf,
+ };
+ struct spi_transfer xfer_buf = {
+ .len = 0,
+ .rx_buf = data,
+ };
+
+ mutex_lock(&lp->bmux);
+
+ buf[0] = CMD_FB;
+ buf[1] = 0x00;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer_head, &msg);
+
+ status = spi_sync(lp->spi, &msg);
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+
+ xfer_buf.len = *(buf + 1) + 1;
+ *len = buf[1];
+
+ buf[0] = CMD_FB;
+ buf[1] = 0x00;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer_head1, &msg);
+ spi_message_add_tail(&xfer_buf, &msg);
+
+ status = spi_sync(lp->spi, &msg);
+
+ if (msg.status)
+ status = msg.status;
+
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ if (status) {
+ if (lqi && (*len > lp->buf[1]))
+ *lqi = data[lp->buf[1]];
+ }
+ mutex_unlock(&lp->bmux);
+
+ return status;
+}
+
+static int
+at86rf230_ed(struct ieee802154_dev *dev, u8 *level)
+{
+ might_sleep();
+ BUG_ON(!level);
+ *level = 0xbe;
+ return 0;
+}
+
+static int
+at86rf230_state(struct ieee802154_dev *dev, int state)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int rc;
+ u8 val;
+ u8 desired_status;
+
+ might_sleep();
+
+ if (state == STATE_FORCE_TX_ON)
+ desired_status = STATE_TX_ON;
+ else if (state == STATE_FORCE_TRX_OFF)
+ desired_status = STATE_TRX_OFF;
+ else
+ desired_status = state;
+
+ do {
+ rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val);
+ if (rc)
+ goto err;
+ } while (val == STATE_TRANSITION_IN_PROGRESS);
+
+ if (val == desired_status)
+ return 0;
+
+ /* state is equal to phy states */
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, state);
+ if (rc)
+ goto err;
+
+ do {
+ rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val);
+ if (rc)
+ goto err;
+ } while (val == STATE_TRANSITION_IN_PROGRESS);
+
+
+ if (val == desired_status)
+ return 0;
+
+ pr_err("unexpected state change: %d, asked for %d\n", val, state);
+ return -EBUSY;
+
+err:
+ pr_err("error: %d\n", rc);
+ return rc;
+}
+
+static int
+at86rf230_start(struct ieee802154_dev *dev)
+{
+ struct at86rf230_local *lp = dev->priv;
+ u8 rc;
+
+ rc = at86rf230_write_subreg(lp, SR_RX_SAFE_MODE, 1);
+ if (rc)
+ return rc;
+
+ return at86rf230_state(dev, STATE_RX_ON);
+}
+
+static void
+at86rf230_stop(struct ieee802154_dev *dev)
+{
+ at86rf230_state(dev, STATE_FORCE_TRX_OFF);
+}
+
+static int
+at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int rc;
+
+ might_sleep();
+
+ if (page != 0 || channel < 11 || channel > 26) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ rc = at86rf230_write_subreg(lp, SR_CHANNEL, channel);
+ msleep(1); /* Wait for PLL */
+ dev->phy->current_channel = channel;
+
+ return 0;
+}
+
+static int
+at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int rc;
+ unsigned long flags;
+
+ spin_lock(&lp->lock);
+ if (lp->irq_disabled) {
+ spin_unlock(&lp->lock);
+ return -EBUSY;
+ }
+ spin_unlock(&lp->lock);
+
+ might_sleep();
+
+ rc = at86rf230_state(dev, STATE_FORCE_TX_ON);
+ if (rc)
+ goto err;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->is_tx = 1;
+ INIT_COMPLETION(lp->tx_complete);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ rc = at86rf230_write_fbuf(lp, skb->data, skb->len);
+ if (rc)
+ goto err_rx;
+
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_BUSY_TX);
+ if (rc)
+ goto err_rx;
+
+ rc = wait_for_completion_interruptible(&lp->tx_complete);
+ if (rc < 0)
+ goto err_rx;
+
+ rc = at86rf230_start(dev);
+
+ return rc;
+
+err_rx:
+ at86rf230_start(dev);
+err:
+ pr_err("error: %d\n", rc);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->is_tx = 0;
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return rc;
+}
+
+static int at86rf230_rx(struct at86rf230_local *lp)
+{
+ u8 len = 128, lqi = 0;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+
+ if (!skb)
+ return -ENOMEM;
+
+ if (at86rf230_read_fbuf(lp, skb_put(skb, len), &len, &lqi))
+ goto err;
+
+ if (len < 2)
+ goto err;
+
+ skb_trim(skb, len - 2); /* We do not put CRC into the frame */
+
+ ieee802154_rx_irqsafe(lp->dev, skb, lqi);
+
+ dev_dbg(&lp->spi->dev, "READ_FBUF: %d %x\n", len, lqi);
+
+ return 0;
+err:
+ pr_debug("received frame is too small\n");
+
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+static struct ieee802154_ops at86rf230_ops = {
+ .owner = THIS_MODULE,
+ .xmit = at86rf230_xmit,
+ .ed = at86rf230_ed,
+ .set_channel = at86rf230_channel,
+ .start = at86rf230_start,
+ .stop = at86rf230_stop,
+};
+
+static void at86rf230_irqwork(struct work_struct *work)
+{
+ struct at86rf230_local *lp =
+ container_of(work, struct at86rf230_local, irqwork);
+ u8 status = 0, val;
+ int rc;
+ unsigned long flags;
+
+ rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &val);
+ status |= val;
+
+ status &= ~IRQ_PLL_LOCK; /* ignore */
+ status &= ~IRQ_RX_START; /* ignore */
+ status &= ~IRQ_AMI; /* ignore */
+ status &= ~IRQ_TRX_UR; /* FIXME: possibly handle ???*/
+
+ if (status & IRQ_TRX_END) {
+ spin_lock_irqsave(&lp->lock, flags);
+ status &= ~IRQ_TRX_END;
+ if (lp->is_tx) {
+ lp->is_tx = 0;
+ spin_unlock_irqrestore(&lp->lock, flags);
+ complete(&lp->tx_complete);
+ } else {
+ spin_unlock_irqrestore(&lp->lock, flags);
+ at86rf230_rx(lp);
+ }
+ }
+
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->irq_disabled = 0;
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ enable_irq(lp->spi->irq);
+}
+
+static irqreturn_t at86rf230_isr(int irq, void *data)
+{
+ struct at86rf230_local *lp = data;
+
+ disable_irq_nosync(irq);
+
+ spin_lock(&lp->lock);
+ lp->irq_disabled = 1;
+ spin_unlock(&lp->lock);
+
+ schedule_work(&lp->irqwork);
+
+ return IRQ_HANDLED;
+}
+
+
+static int at86rf230_hw_init(struct at86rf230_local *lp)
+{
+ u8 status;
+ int rc;
+
+ rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
+ if (rc)
+ return rc;
+
+ dev_info(&lp->spi->dev, "Status: %02x\n", status);
+ if (status == STATE_P_ON) {
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TRX_OFF);
+ if (rc)
+ return rc;
+ msleep(1);
+ rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
+ if (rc)
+ return rc;
+ dev_info(&lp->spi->dev, "Status: %02x\n", status);
+ }
+
+ rc = at86rf230_write_subreg(lp, SR_IRQ_MASK, 0xff); /* IRQ_TRX_UR |
+ * IRQ_CCA_ED |
+ * IRQ_TRX_END |
+ * IRQ_PLL_UNL |
+ * IRQ_PLL_LOCK
+ */
+ if (rc)
+ return rc;
+
+ /* CLKM changes are applied immediately */
+ rc = at86rf230_write_subreg(lp, SR_CLKM_SHA_SEL, 0x00);
+ if (rc)
+ return rc;
+
+ /* Turn CLKM Off */
+ rc = at86rf230_write_subreg(lp, SR_CLKM_CTRL, 0x00);
+ if (rc)
+ return rc;
+ /* Wait the next SLEEP cycle */
+ msleep(100);
+
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ON);
+ if (rc)
+ return rc;
+ msleep(1);
+
+ rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
+ if (rc)
+ return rc;
+ dev_info(&lp->spi->dev, "Status: %02x\n", status);
+
+ rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status);
+ if (rc)
+ return rc;
+ if (!status) {
+ dev_err(&lp->spi->dev, "DVDD error\n");
+ return -EINVAL;
+ }
+
+ rc = at86rf230_read_subreg(lp, SR_AVDD_OK, &status);
+ if (rc)
+ return rc;
+ if (!status) {
+ dev_err(&lp->spi->dev, "AVDD error\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int at86rf230_suspend(struct spi_device *spi, pm_message_t message)
+{
+ return 0;
+}
+
+static int at86rf230_resume(struct spi_device *spi)
+{
+ return 0;
+}
+
+static int at86rf230_fill_data(struct spi_device *spi)
+{
+ struct at86rf230_local *lp = spi_get_drvdata(spi);
+ struct at86rf230_platform_data *pdata = spi->dev.platform_data;
+
+ if (!pdata) {
+ dev_err(&spi->dev, "no platform_data\n");
+ return -EINVAL;
+ }
+
+ lp->rstn = pdata->rstn;
+ lp->slp_tr = pdata->slp_tr;
+ lp->dig2 = pdata->dig2;
+
+ return 0;
+}
+
+static int __devinit at86rf230_probe(struct spi_device *spi)
+{
+ struct ieee802154_dev *dev;
+ struct at86rf230_local *lp;
+ u8 man_id_0, man_id_1;
+ int rc;
+ const char *chip;
+ int supported = 0;
+
+ if (!spi->irq) {
+ dev_err(&spi->dev, "no IRQ specified\n");
+ return -EINVAL;
+ }
+
+ dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops);
+ if (!dev)
+ return -ENOMEM;
+
+ lp = dev->priv;
+ lp->dev = dev;
+
+ lp->spi = spi;
+
+ dev->priv = lp;
+ dev->parent = &spi->dev;
+ dev->extra_tx_headroom = 0;
+ /* We do support only 2.4 Ghz */
+ dev->phy->channels_supported[0] = 0x7FFF800;
+ dev->flags = IEEE802154_HW_OMIT_CKSUM;
+
+ mutex_init(&lp->bmux);
+ INIT_WORK(&lp->irqwork, at86rf230_irqwork);
+ spin_lock_init(&lp->lock);
+ init_completion(&lp->tx_complete);
+
+ spi_set_drvdata(spi, lp);
+
+ rc = at86rf230_fill_data(spi);
+ if (rc)
+ goto err_fill;
+
+ rc = gpio_request(lp->rstn, "rstn");
+ if (rc)
+ goto err_rstn;
+
+ if (gpio_is_valid(lp->slp_tr)) {
+ rc = gpio_request(lp->slp_tr, "slp_tr");
+ if (rc)
+ goto err_slp_tr;
+ }
+
+ rc = gpio_direction_output(lp->rstn, 1);
+ if (rc)
+ goto err_gpio_dir;
+
+ if (gpio_is_valid(lp->slp_tr)) {
+ rc = gpio_direction_output(lp->slp_tr, 0);
+ if (rc)
+ goto err_gpio_dir;
+ }
+
+ /* Reset */
+ msleep(1);
+ gpio_set_value(lp->rstn, 0);
+ msleep(1);
+ gpio_set_value(lp->rstn, 1);
+ msleep(1);
+
+ rc = at86rf230_read_subreg(lp, SR_MAN_ID_0, &man_id_0);
+ if (rc)
+ goto err_gpio_dir;
+ rc = at86rf230_read_subreg(lp, SR_MAN_ID_1, &man_id_1);
+ if (rc)
+ goto err_gpio_dir;
+
+ if (man_id_1 != 0x00 || man_id_0 != 0x1f) {
+ dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
+ man_id_1, man_id_0);
+ rc = -EINVAL;
+ goto err_gpio_dir;
+ }
+
+ rc = at86rf230_read_subreg(lp, SR_PART_NUM, &lp->part);
+ if (rc)
+ goto err_gpio_dir;
+
+ rc = at86rf230_read_subreg(lp, SR_VERSION_NUM, &lp->vers);
+ if (rc)
+ goto err_gpio_dir;
+
+ switch (lp->part) {
+ case 2:
+ chip = "at86rf230";
+ /* supported = 1; FIXME: should be easy to support; */
+ break;
+ case 3:
+ chip = "at86rf231";
+ supported = 1;
+ break;
+ default:
+ chip = "UNKNOWN";
+ break;
+ }
+
+ dev_info(&spi->dev, "Detected %s chip version %d\n", chip, lp->vers);
+ if (!supported) {
+ rc = -ENOTSUPP;
+ goto err_gpio_dir;
+ }
+
+ rc = at86rf230_hw_init(lp);
+ if (rc)
+ goto err_gpio_dir;
+
+ rc = request_irq(spi->irq, at86rf230_isr, IRQF_SHARED,
+ dev_name(&spi->dev), lp);
+ if (rc)
+ goto err_gpio_dir;
+
+ rc = ieee802154_register_device(lp->dev);
+ if (rc)
+ goto err_irq;
+
+ return rc;
+
+ ieee802154_unregister_device(lp->dev);
+err_irq:
+ free_irq(spi->irq, lp);
+ flush_work(&lp->irqwork);
+err_gpio_dir:
+ if (gpio_is_valid(lp->slp_tr))
+ gpio_free(lp->slp_tr);
+err_slp_tr:
+ gpio_free(lp->rstn);
+err_rstn:
+err_fill:
+ spi_set_drvdata(spi, NULL);
+ mutex_destroy(&lp->bmux);
+ ieee802154_free_device(lp->dev);
+ return rc;
+}
+
+static int __devexit at86rf230_remove(struct spi_device *spi)
+{
+ struct at86rf230_local *lp = spi_get_drvdata(spi);
+
+ ieee802154_unregister_device(lp->dev);
+
+ free_irq(spi->irq, lp);
+ flush_work(&lp->irqwork);
+
+ if (gpio_is_valid(lp->slp_tr))
+ gpio_free(lp->slp_tr);
+ gpio_free(lp->rstn);
+
+ spi_set_drvdata(spi, NULL);
+ mutex_destroy(&lp->bmux);
+ ieee802154_free_device(lp->dev);
+
+ dev_dbg(&spi->dev, "unregistered at86rf230\n");
+ return 0;
+}
+
+static struct spi_driver at86rf230_driver = {
+ .driver = {
+ .name = "at86rf230",
+ .owner = THIS_MODULE,
+ },
+ .probe = at86rf230_probe,
+ .remove = __devexit_p(at86rf230_remove),
+ .suspend = at86rf230_suspend,
+ .resume = at86rf230_resume,
+};
+
+static int __init at86rf230_init(void)
+{
+ return spi_register_driver(&at86rf230_driver);
+}
+module_init(at86rf230_init);
+
+static void __exit at86rf230_exit(void)
+{
+ spi_unregister_driver(&at86rf230_driver);
+}
+module_exit(at86rf230_exit);
+
+MODULE_DESCRIPTION("AT86RF230 Transceiver Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 56eecef..2ec93da 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -8,8 +8,7 @@ menuconfig IIO
help
The industrial I/O subsystem provides a unified framework for
drivers for many different types of embedded sensors using a
- number of different physical interfaces (i2c, spi, etc). See
- Documentation/iio for more information.
+ number of different physical interfaces (i2c, spi, etc).
if IIO
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 1ddd886..4f947e4 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -661,7 +661,6 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
* New channel registration method - relies on the fact a group does
* not need to be initialized if it is name is NULL.
*/
- INIT_LIST_HEAD(&indio_dev->channel_attr_list);
if (indio_dev->channels)
for (i = 0; i < indio_dev->num_channels; i++) {
ret = iio_device_add_channel_sysfs(indio_dev,
@@ -725,12 +724,16 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
static void iio_dev_release(struct device *device)
{
struct iio_dev *indio_dev = dev_to_iio_dev(device);
- cdev_del(&indio_dev->chrdev);
+ if (indio_dev->chrdev.dev)
+ cdev_del(&indio_dev->chrdev);
if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
iio_device_unregister_trigger_consumer(indio_dev);
iio_device_unregister_eventset(indio_dev);
iio_device_unregister_sysfs(indio_dev);
iio_device_unregister_debugfs(indio_dev);
+
+ ida_simple_remove(&iio_ida, indio_dev->id);
+ kfree(indio_dev);
}
static struct device_type iio_dev_type = {
@@ -761,6 +764,7 @@ struct iio_dev *iio_device_alloc(int sizeof_priv)
dev_set_drvdata(&dev->dev, (void *)dev);
mutex_init(&dev->mlock);
mutex_init(&dev->info_exist_lock);
+ INIT_LIST_HEAD(&dev->channel_attr_list);
dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
if (dev->id < 0) {
@@ -778,10 +782,8 @@ EXPORT_SYMBOL(iio_device_alloc);
void iio_device_free(struct iio_dev *dev)
{
- if (dev) {
- ida_simple_remove(&iio_ida, dev->id);
- kfree(dev);
- }
+ if (dev)
+ put_device(&dev->dev);
}
EXPORT_SYMBOL(iio_device_free);
@@ -902,7 +904,7 @@ void iio_device_unregister(struct iio_dev *indio_dev)
mutex_lock(&indio_dev->info_exist_lock);
indio_dev->info = NULL;
mutex_unlock(&indio_dev->info_exist_lock);
- device_unregister(&indio_dev->dev);
+ device_del(&indio_dev->dev);
}
EXPORT_SYMBOL(iio_device_unregister);
subsys_initcall(iio_init);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 55d5642..2e826f9 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1184,7 +1184,7 @@ static void cma_set_req_event_data(struct rdma_cm_event *event,
static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
{
- return (((ib_event->event == IB_CM_REQ_RECEIVED) ||
+ return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
(ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
(id->qp_type == IB_QPT_UD)) ||
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index e497dfbe..3ae2bfd 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -108,12 +108,14 @@ void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
unsigned char *prev_tail;
prev_tail = skb_tail_pointer(skb);
- *nlh = NLMSG_NEW(skb, 0, seq, RDMA_NL_GET_TYPE(client, op),
- len, NLM_F_MULTI);
+ *nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op),
+ len, NLM_F_MULTI);
+ if (!*nlh)
+ goto out_nlmsg_trim;
(*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail;
- return NLMSG_DATA(*nlh);
+ return nlmsg_data(*nlh);
-nlmsg_failure:
+out_nlmsg_trim:
nlmsg_trim(skb, prev_tail);
return NULL;
}
@@ -171,8 +173,11 @@ static void ibnl_rcv(struct sk_buff *skb)
int __init ibnl_init(void)
{
- nls = netlink_kernel_create(&init_net, NETLINK_RDMA, 0, ibnl_rcv,
- NULL, THIS_MODULE);
+ struct netlink_kernel_cfg cfg = {
+ .input = ibnl_rcv,
+ };
+
+ nls = netlink_kernel_create(&init_net, NETLINK_RDMA, THIS_MODULE, &cfg);
if (!nls) {
pr_warn("Failed to create netlink socket\n");
return -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 740dcc0..77b6b18 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1374,7 +1374,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
goto reject;
}
dst = &rt->dst;
- l2t = t3_l2t_get(tdev, dst, NULL);
+ l2t = t3_l2t_get(tdev, dst, NULL, &req->peer_ip);
if (!l2t) {
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__);
@@ -1942,7 +1942,8 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto fail3;
}
ep->dst = &rt->dst;
- ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL);
+ ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL,
+ &cm_id->remote_addr.sin_addr.s_addr);
if (!ep->l2t) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
err = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 55ab284e..b18870c 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1593,6 +1593,10 @@ static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
struct net_device *pdev;
pdev = ip_dev_find(&init_net, peer_ip);
+ if (!pdev) {
+ err = -ENODEV;
+ goto out;
+ }
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
n, pdev, 0);
if (!ep->l2t)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ee1c577..8a3a203 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -140,7 +140,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->max_mr_size = ~0ull;
props->page_size_cap = dev->dev->caps.page_size_cap;
props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps;
- props->max_qp_wr = dev->dev->caps.max_wqes;
+ props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
props->max_sge = min(dev->dev->caps.max_sq_sg,
dev->dev->caps.max_rq_sg);
props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs;
@@ -718,26 +718,53 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
return ret;
}
+struct mlx4_ib_steering {
+ struct list_head list;
+ u64 reg_id;
+ union ib_gid gid;
+};
+
static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
int err;
struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
struct mlx4_ib_qp *mqp = to_mqp(ibqp);
+ u64 reg_id;
+ struct mlx4_ib_steering *ib_steering = NULL;
+
+ if (mdev->dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
+ if (!ib_steering)
+ return -ENOMEM;
+ }
- err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
- !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
- MLX4_PROT_IB_IPV6);
+ err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
+ !!(mqp->flags &
+ MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
+ MLX4_PROT_IB_IPV6, &reg_id);
if (err)
- return err;
+ goto err_malloc;
err = add_gid_entry(ibqp, gid);
if (err)
goto err_add;
+ if (ib_steering) {
+ memcpy(ib_steering->gid.raw, gid->raw, 16);
+ ib_steering->reg_id = reg_id;
+ mutex_lock(&mqp->mutex);
+ list_add(&ib_steering->list, &mqp->steering_rules);
+ mutex_unlock(&mqp->mutex);
+ }
return 0;
err_add:
- mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6);
+ mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
+ MLX4_PROT_IB_IPV6, reg_id);
+err_malloc:
+ kfree(ib_steering);
+
return err;
}
@@ -765,9 +792,30 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
u8 mac[6];
struct net_device *ndev;
struct mlx4_ib_gid_entry *ge;
+ u64 reg_id = 0;
+
+ if (mdev->dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ struct mlx4_ib_steering *ib_steering;
- err = mlx4_multicast_detach(mdev->dev,
- &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6);
+ mutex_lock(&mqp->mutex);
+ list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
+ if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
+ list_del(&ib_steering->list);
+ break;
+ }
+ }
+ mutex_unlock(&mqp->mutex);
+ if (&ib_steering->list == &mqp->steering_rules) {
+ pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
+ return -EINVAL;
+ }
+ reg_id = ib_steering->reg_id;
+ kfree(ib_steering);
+ }
+
+ err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
+ MLX4_PROT_IB_IPV6, reg_id);
if (err)
return err;
@@ -1084,12 +1132,9 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
int total_eqs = 0;
int i, j, eq;
- /* Init eq table */
- ibdev->eq_table = NULL;
- ibdev->eq_added = 0;
-
- /* Legacy mode? */
- if (dev->caps.comp_pool == 0)
+ /* Legacy mode or comp_pool is not large enough */
+ if (dev->caps.comp_pool == 0 ||
+ dev->caps.num_ports > dev->caps.comp_pool)
return;
eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
@@ -1135,7 +1180,10 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
{
int i;
- int total_eqs;
+
+ /* no additional eqs were added */
+ if (!ibdev->eq_table)
+ return;
/* Reset the advertised EQ number */
ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
@@ -1148,12 +1196,7 @@ static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
mlx4_release_eq(dev, ibdev->eq_table[i]);
}
- total_eqs = dev->caps.num_comp_vectors + ibdev->eq_added;
- memset(ibdev->eq_table, 0, total_eqs * sizeof(int));
kfree(ibdev->eq_table);
-
- ibdev->eq_table = NULL;
- ibdev->eq_added = 0;
}
static void *mlx4_ib_add(struct mlx4_dev *dev)
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index e62297c..42df4f7 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -44,6 +44,14 @@
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
+enum {
+ MLX4_IB_SQ_MIN_WQE_SHIFT = 6,
+ MLX4_IB_MAX_HEADROOM = 2048
+};
+
+#define MLX4_IB_SQ_HEADROOM(shift) ((MLX4_IB_MAX_HEADROOM >> (shift)) + 1)
+#define MLX4_IB_SQ_MAX_SPARE (MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT))
+
struct mlx4_ib_ucontext {
struct ib_ucontext ibucontext;
struct mlx4_uar uar;
@@ -155,6 +163,7 @@ struct mlx4_ib_qp {
u8 state;
int mlx_type;
struct list_head gid_list;
+ struct list_head steering_rules;
};
struct mlx4_ib_srq {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index ceb3332..6af19f6 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -310,8 +310,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
int is_user, int has_rq, struct mlx4_ib_qp *qp)
{
/* Sanity check RQ size before proceeding */
- if (cap->max_recv_wr > dev->dev->caps.max_wqes ||
- cap->max_recv_sge > dev->dev->caps.max_rq_sg)
+ if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE ||
+ cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg))
return -EINVAL;
if (!has_rq) {
@@ -329,8 +329,17 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg));
}
- cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt;
- cap->max_recv_sge = qp->rq.max_gs;
+ /* leave userspace return values as they were, so as not to break ABI */
+ if (is_user) {
+ cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt;
+ cap->max_recv_sge = qp->rq.max_gs;
+ } else {
+ cap->max_recv_wr = qp->rq.max_post =
+ min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt);
+ cap->max_recv_sge = min(qp->rq.max_gs,
+ min(dev->dev->caps.max_sq_sg,
+ dev->dev->caps.max_rq_sg));
+ }
return 0;
}
@@ -341,8 +350,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
int s;
/* Sanity check SQ size before proceeding */
- if (cap->max_send_wr > dev->dev->caps.max_wqes ||
- cap->max_send_sge > dev->dev->caps.max_sq_sg ||
+ if (cap->max_send_wr > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) ||
+ cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) ||
cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
return -EINVAL;
@@ -486,6 +495,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
INIT_LIST_HEAD(&qp->gid_list);
+ INIT_LIST_HEAD(&qp->steering_rules);
qp->state = IB_QPS_RESET;
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 85a69c9..48970af 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -61,6 +61,7 @@ struct ocrdma_dev_attr {
u32 max_inline_data;
int max_send_sge;
int max_recv_sge;
+ int max_srq_sge;
int max_mr;
u64 max_mr_size;
u32 max_num_mr_pbl;
@@ -231,7 +232,6 @@ struct ocrdma_qp_hwq_info {
u32 entry_size;
u32 max_cnt;
u32 max_wqe_idx;
- u32 free_delta;
u16 dbid; /* qid, where to ring the doorbell. */
u32 len;
dma_addr_t pa;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
index a411a4e..517ab20 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
@@ -101,8 +101,6 @@ struct ocrdma_create_qp_uresp {
u32 rsvd1;
u32 num_wqe_allocated;
u32 num_rqe_allocated;
- u32 free_wqe_delta;
- u32 free_rqe_delta;
u32 db_sq_offset;
u32 db_rq_offset;
u32 db_shift;
@@ -126,8 +124,7 @@ struct ocrdma_create_srq_uresp {
u32 db_rq_offset;
u32 db_shift;
- u32 free_rqe_delta;
- u32 rsvd2;
+ u64 rsvd2;
u64 rsvd3;
} __packed;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 9b204b1..71942af 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -732,7 +732,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
break;
case OCRDMA_SRQ_LIMIT_EVENT:
ib_evt.element.srq = &qp->srq->ibsrq;
- ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED;
srq_event = 1;
qp_event = 0;
break;
@@ -990,8 +990,6 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
struct ocrdma_dev_attr *attr,
struct ocrdma_mbx_query_config *rsp)
{
- int max_q_mem;
-
attr->max_pd =
(rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
@@ -1004,6 +1002,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_recv_sge = (rsp->max_write_send_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
+ attr->max_srq_sge = (rsp->max_srq_rqe_sge &
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
@@ -1037,18 +1038,15 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_inline_data =
attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
sizeof(struct ocrdma_sge));
- max_q_mem = OCRDMA_Q_PAGE_BASE_SIZE << (OCRDMA_MAX_Q_PAGE_SIZE_CNT - 1);
- /* hw can queue one less then the configured size,
- * so publish less by one to stack.
- */
if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
- dev->attr.max_wqe = max_q_mem / dev->attr.wqe_size;
attr->ird = 1;
attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
- } else
- dev->attr.max_wqe = (max_q_mem / dev->attr.wqe_size) - 1;
- dev->attr.max_rqe = (max_q_mem / dev->attr.rqe_size) - 1;
+ }
+ dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >>
+ OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET;
+ dev->attr.max_rqe = rsp->max_wqes_rqes_per_q &
+ OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK;
}
static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
@@ -1990,19 +1988,12 @@ static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp,
max_wqe_allocated = 1 << max_wqe_allocated;
max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe);
- if (qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
- qp->sq.free_delta = 0;
- qp->rq.free_delta = 1;
- } else
- qp->sq.free_delta = 1;
-
qp->sq.max_cnt = max_wqe_allocated;
qp->sq.max_wqe_idx = max_wqe_allocated - 1;
if (!attrs->srq) {
qp->rq.max_cnt = max_rqe_allocated;
qp->rq.max_wqe_idx = max_rqe_allocated - 1;
- qp->rq.free_delta = 1;
}
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index a20d16e..b050e62 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -26,7 +26,6 @@
*******************************************************************/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/idr.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
@@ -98,13 +97,11 @@ static void ocrdma_build_sgid_mac(union ib_gid *sgid, unsigned char *mac_addr,
sgid->raw[15] = mac_addr[5];
}
-static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
+static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
bool is_vlan, u16 vlan_id)
{
int i;
- bool found = false;
union ib_gid new_sgid;
- int free_idx = OCRDMA_MAX_SGID;
unsigned long flags;
memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid));
@@ -116,23 +113,19 @@ static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid,
sizeof(union ib_gid))) {
/* found free entry */
- if (!found) {
- free_idx = i;
- found = true;
- break;
- }
+ memcpy(&dev->sgid_tbl[i], &new_sgid,
+ sizeof(union ib_gid));
+ spin_unlock_irqrestore(&dev->sgid_lock, flags);
+ return true;
} else if (!memcmp(&dev->sgid_tbl[i], &new_sgid,
sizeof(union ib_gid))) {
/* entry already present, no addition is required. */
spin_unlock_irqrestore(&dev->sgid_lock, flags);
- return;
+ return false;
}
}
- /* if entry doesn't exist and if table has some space, add entry */
- if (found)
- memcpy(&dev->sgid_tbl[free_idx], &new_sgid,
- sizeof(union ib_gid));
spin_unlock_irqrestore(&dev->sgid_lock, flags);
+ return false;
}
static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
@@ -168,7 +161,8 @@ static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)
ocrdma_get_guid(dev, &sgid->raw[8]);
}
-static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
{
struct net_device *netdev, *tmp;
u16 vlan_id;
@@ -176,8 +170,6 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
netdev = dev->nic_info.netdev;
- ocrdma_add_default_sgid(dev);
-
rcu_read_lock();
for_each_netdev_rcu(&init_net, tmp) {
if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) {
@@ -195,10 +187,23 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
}
}
rcu_read_unlock();
+}
+#else
+static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
+{
+
+}
+#endif /* VLAN */
+
+static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
+{
+ ocrdma_add_default_sgid(dev);
+ ocrdma_add_vlan_sgids(dev);
return 0;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) || \
+defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
static int ocrdma_inet6addr_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
@@ -209,6 +214,7 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
struct ib_event gid_event;
struct ocrdma_dev *dev;
bool found = false;
+ bool updated = false;
bool is_vlan = false;
u16 vid = 0;
@@ -234,23 +240,21 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
mutex_lock(&dev->dev_lock);
switch (event) {
case NETDEV_UP:
- ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);
+ updated = ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);
break;
case NETDEV_DOWN:
- found = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);
- if (found) {
- /* found the matching entry, notify
- * the consumers about it
- */
- gid_event.device = &dev->ibdev;
- gid_event.element.port_num = 1;
- gid_event.event = IB_EVENT_GID_CHANGE;
- ib_dispatch_event(&gid_event);
- }
+ updated = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);
break;
default:
break;
}
+ if (updated) {
+ /* GID table updated, notify the consumers about it */
+ gid_event.device = &dev->ibdev;
+ gid_event.element.port_num = 1;
+ gid_event.event = IB_EVENT_GID_CHANGE;
+ ib_dispatch_event(&gid_event);
+ }
mutex_unlock(&dev->dev_lock);
return NOTIFY_OK;
}
@@ -259,7 +263,7 @@ static struct notifier_block ocrdma_inet6addr_notifier = {
.notifier_call = ocrdma_inet6addr_event
};
-#endif /* IPV6 */
+#endif /* IPV6 and VLAN */
static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
u8 port_num)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 7fd80cc..c75cbdf 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -418,6 +418,9 @@ enum {
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0,
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF,
+ OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT = 16,
+ OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK = 0xFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT,
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0,
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF,
@@ -458,7 +461,7 @@ enum {
OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET,
OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET = 0,
OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK = 0xFFFF <<
- OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET,
+ OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET,
OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET = 16,
OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK = 0xFFFF <<
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index e9f74d1..2e2e7ae 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
dev = get_ocrdma_dev(ibdev);
memset(sgid, 0, sizeof(*sgid));
- if (index > OCRDMA_MAX_SGID)
+ if (index >= OCRDMA_MAX_SGID)
return -EINVAL;
memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
@@ -83,8 +83,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
IB_DEVICE_SHUTDOWN_PORT |
IB_DEVICE_SYS_IMAGE_GUID |
IB_DEVICE_LOCAL_DMA_LKEY;
- attr->max_sge = dev->attr.max_send_sge;
- attr->max_sge_rd = dev->attr.max_send_sge;
+ attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
+ attr->max_sge_rd = 0;
attr->max_cq = dev->attr.max_cq;
attr->max_cqe = dev->attr.max_cqe;
attr->max_mr = dev->attr.max_mr;
@@ -97,7 +97,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
attr->max_srq = (dev->attr.max_qp - 1);
- attr->max_srq_sge = attr->max_sge;
+ attr->max_srq_sge = attr->max_srq_sge;
attr->max_srq_wr = dev->attr.max_rqe;
attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
attr->max_fast_reg_page_list_len = 0;
@@ -940,8 +940,6 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
uresp.db_shift = 16;
}
- uresp.free_wqe_delta = qp->sq.free_delta;
- uresp.free_rqe_delta = qp->rq.free_delta;
if (qp->dpp_enabled) {
uresp.dpp_credit = dpp_credit_lmt;
@@ -1307,8 +1305,6 @@ static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
free_cnt = (q->max_cnt - q->head) + q->tail;
else
free_cnt = q->tail - q->head;
- if (q->free_delta)
- free_cnt -= q->free_delta;
return free_cnt;
}
@@ -1501,7 +1497,6 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata)
(srq->pd->id * srq->dev->nic_info.db_page_size);
uresp.db_page_size = srq->dev->nic_info.db_page_size;
uresp.num_rqe_allocated = srq->rq.max_cnt;
- uresp.free_rqe_delta = 1;
if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET;
uresp.db_shift = 24;
@@ -2306,8 +2301,10 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
*stop = true;
expand = false;
}
- } else
+ } else {
+ *polled = true;
expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
+ }
return expand;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index e648343..633f03d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -28,7 +28,6 @@
#ifndef __OCRDMA_VERBS_H__
#define __OCRDMA_VERBS_H__
-#include <linux/version.h>
int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *,
struct ib_send_wr **bad_wr);
int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 5c1bc99..f10221f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -123,7 +123,7 @@ static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
skb_frag_size_set(frag, size);
skb->data_len += size;
- skb->truesize += size;
+ skb->truesize += PAGE_SIZE;
} else
skb_put(skb, length);
@@ -156,14 +156,18 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
int buf_size;
+ int tailroom;
u64 *mapping;
- if (ipoib_ud_need_sg(priv->max_ib_mtu))
+ if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
buf_size = IPOIB_UD_HEAD_SIZE;
- else
+ tailroom = 128; /* reserve some tailroom for IP/TCP headers */
+ } else {
buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+ tailroom = 0;
+ }
- skb = dev_alloc_skb(buf_size + 4);
+ skb = dev_alloc_skb(buf_size + tailroom + 4);
if (unlikely(!skb))
return NULL;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 3974c29..bbee4b2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -715,7 +715,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
rcu_read_lock();
if (likely(skb_dst(skb))) {
- n = dst_get_neighbour_noref(skb_dst(skb));
+ n = dst_neigh_lookup_skb(skb_dst(skb), skb);
if (!n) {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
@@ -797,6 +797,8 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
unlock:
+ if (n)
+ neigh_release(n);
rcu_read_unlock();
return NETDEV_TX_OK;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 20ebc6f..7cecb16 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -658,9 +658,15 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct dst_entry *dst = skb_dst(skb);
struct ipoib_mcast *mcast;
+ struct neighbour *n;
unsigned long flags;
+ n = NULL;
+ if (dst)
+ n = dst_neigh_lookup_skb(dst, skb);
+
spin_lock_irqsave(&priv->lock, flags);
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) ||
@@ -715,29 +721,28 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
out:
if (mcast && mcast->ah) {
- struct dst_entry *dst = skb_dst(skb);
- struct neighbour *n = NULL;
-
- rcu_read_lock();
- if (dst)
- n = dst_get_neighbour_noref(dst);
- if (n && !*to_ipoib_neigh(n)) {
- struct ipoib_neigh *neigh = ipoib_neigh_alloc(n,
- skb->dev);
-
- if (neigh) {
- kref_get(&mcast->ah->ref);
- neigh->ah = mcast->ah;
- list_add_tail(&neigh->list, &mcast->neigh_list);
+ if (n) {
+ if (!*to_ipoib_neigh(n)) {
+ struct ipoib_neigh *neigh;
+
+ neigh = ipoib_neigh_alloc(n, skb->dev);
+ if (neigh) {
+ kref_get(&mcast->ah->ref);
+ neigh->ah = mcast->ah;
+ list_add_tail(&neigh->list,
+ &mcast->neigh_list);
+ }
}
+ neigh_release(n);
}
- rcu_read_unlock();
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
return;
}
unlock:
+ if (n)
+ neigh_release(n);
spin_unlock_irqrestore(&priv->lock, flags);
}
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index d90a421..a2e418c 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -547,26 +547,12 @@ static void iommu_poll_events(struct amd_iommu *iommu)
spin_unlock_irqrestore(&iommu->lock, flags);
}
-static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
+static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
{
struct amd_iommu_fault fault;
- volatile u64 *raw;
- int i;
INC_STATS_COUNTER(pri_requests);
- raw = (u64 *)(iommu->ppr_log + head);
-
- /*
- * Hardware bug: Interrupt may arrive before the entry is written to
- * memory. If this happens we need to wait for the entry to arrive.
- */
- for (i = 0; i < LOOP_TIMEOUT; ++i) {
- if (PPR_REQ_TYPE(raw[0]) != 0)
- break;
- udelay(1);
- }
-
if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
return;
@@ -578,12 +564,6 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
fault.tag = PPR_TAG(raw[0]);
fault.flags = PPR_FLAGS(raw[0]);
- /*
- * To detect the hardware bug we need to clear the entry
- * to back to zero.
- */
- raw[0] = raw[1] = 0;
-
atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
}
@@ -595,25 +575,62 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
if (iommu->ppr_log == NULL)
return;
+ /* enable ppr interrupts again */
+ writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
+
spin_lock_irqsave(&iommu->lock, flags);
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
while (head != tail) {
+ volatile u64 *raw;
+ u64 entry[2];
+ int i;
- /* Handle PPR entry */
- iommu_handle_ppr_entry(iommu, head);
+ raw = (u64 *)(iommu->ppr_log + head);
+
+ /*
+ * Hardware bug: Interrupt may arrive before the entry is
+ * written to memory. If this happens we need to wait for the
+ * entry to arrive.
+ */
+ for (i = 0; i < LOOP_TIMEOUT; ++i) {
+ if (PPR_REQ_TYPE(raw[0]) != 0)
+ break;
+ udelay(1);
+ }
+
+ /* Avoid memcpy function-call overhead */
+ entry[0] = raw[0];
+ entry[1] = raw[1];
- /* Update and refresh ring-buffer state*/
+ /*
+ * To detect the hardware bug we need to clear the entry
+ * back to zero.
+ */
+ raw[0] = raw[1] = 0UL;
+
+ /* Update head pointer of hardware ring-buffer */
head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+
+ /*
+ * Release iommu->lock because ppr-handling might need to
+ * re-aquire it
+ */
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ /* Handle PPR entry */
+ iommu_handle_ppr_entry(iommu, entry);
+
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ /* Refresh ring-buffer information */
+ head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
}
- /* enable ppr interrupts again */
- writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
-
spin_unlock_irqrestore(&iommu->lock, flags);
}
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index c567903..542024b 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1029,6 +1029,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if (!iommu->dev)
return 1;
+ iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
+ PCI_DEVFN(0, 0));
+
iommu->cap_ptr = h->cap_ptr;
iommu->pci_seg = h->pci_seg;
iommu->mmio_phys = h->mmio_phys;
@@ -1323,20 +1326,16 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
{
int i, j;
u32 ioc_feature_control;
- struct pci_dev *pdev = NULL;
+ struct pci_dev *pdev = iommu->root_pdev;
/* RD890 BIOSes may not have completely reconfigured the iommu */
- if (!is_rd890_iommu(iommu->dev))
+ if (!is_rd890_iommu(iommu->dev) || !pdev)
return;
/*
* First, we need to ensure that the iommu is enabled. This is
* controlled by a register in the northbridge
*/
- pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0));
-
- if (!pdev)
- return;
/* Select Northbridge indirect register 0x75 and enable writing */
pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
@@ -1346,8 +1345,6 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
if (!(ioc_feature_control & 0x1))
pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
- pci_dev_put(pdev);
-
/* Restore the iommu BAR */
pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
iommu->stored_addr_lo);
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 2452f3b..2435555 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -481,6 +481,9 @@ struct amd_iommu {
/* Pointer to PCI device of this IOMMU */
struct pci_dev *dev;
+ /* Cache pdev to root device for resume quirks */
+ struct pci_dev *root_pdev;
+
/* physical address of MMIO space */
u64 mmio_phys;
/* virtual address of MMIO space */
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 1a0ae44..5f21f629 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -135,8 +135,8 @@ send_layer2(struct mISDNstack *st, struct sk_buff *skb)
skb = NULL;
else if (*debug & DEBUG_SEND_ERR)
printk(KERN_DEBUG
- "%s ch%d mgr prim(%x) addr(%x) err %d\n",
- __func__, ch->nr, hh->prim, ch->addr, ret);
+ "%s mgr prim(%x) err %d\n",
+ __func__, hh->prim, ret);
}
out:
mutex_unlock(&st->lmutex);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 04cb8c8..12b2b55 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -379,7 +379,7 @@ config LEDS_NETXBIG
config LEDS_ASIC3
bool "LED support for the HTC ASIC3"
- depends on LEDS_CLASS
+ depends on LEDS_CLASS=y
depends on MFD_ASIC3
default y
help
@@ -390,7 +390,7 @@ config LEDS_ASIC3
config LEDS_RENESAS_TPU
bool "LED support for Renesas TPU"
- depends on LEDS_CLASS && HAVE_CLK && GENERIC_GPIO
+ depends on LEDS_CLASS=y && HAVE_CLK && GENERIC_GPIO
help
This option enables build of the LED TPU platform driver,
suitable to drive any TPU channel on newer Renesas SoCs.
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 8ee92c8..e663e6f 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -29,7 +29,7 @@ static void led_update_brightness(struct led_classdev *led_cdev)
led_cdev->brightness = led_cdev->brightness_get(led_cdev);
}
-static ssize_t led_brightness_show(struct device *dev,
+static ssize_t led_brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index d686004..d65353d 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -44,13 +44,6 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
if (!led_cdev->blink_brightness)
led_cdev->blink_brightness = led_cdev->max_brightness;
- if (led_get_trigger_data(led_cdev) &&
- delay_on == led_cdev->blink_delay_on &&
- delay_off == led_cdev->blink_delay_off)
- return;
-
- led_stop_software_blink(led_cdev);
-
led_cdev->blink_delay_on = delay_on;
led_cdev->blink_delay_off = delay_off;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 37fdaf8..ce59824 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2292,6 +2292,13 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
if (r)
return r;
+ r = dm_pool_commit_metadata(pool->pmd);
+ if (r) {
+ DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
+ __func__, r);
+ return r;
+ }
+
r = dm_pool_reserve_metadata_snap(pool->pmd);
if (r)
DMWARN("reserve_metadata_snap message failed.");
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 1c2f904..a4c219e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5784,8 +5784,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
super_types[mddev->major_version].
validate_super(mddev, rdev);
if ((info->state & (1<<MD_DISK_SYNC)) &&
- (!test_bit(In_sync, &rdev->flags) ||
- rdev->raid_disk != info->raid_disk)) {
+ rdev->raid_disk != info->raid_disk) {
/* This was a hot-add request, but events doesn't
* match, so reject it.
*/
@@ -6751,7 +6750,7 @@ struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev
thread->tsk = kthread_run(md_thread, thread,
"%s_%s",
mdname(thread->mddev),
- name ?: mddev->pers->name);
+ name);
if (IS_ERR(thread->tsk)) {
kfree(thread);
return NULL;
@@ -7298,6 +7297,7 @@ void md_do_sync(struct mddev *mddev)
int skipped = 0;
struct md_rdev *rdev;
char *desc;
+ struct blk_plug plug;
/* just incase thread restarts... */
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
@@ -7447,6 +7447,7 @@ void md_do_sync(struct mddev *mddev)
}
mddev->curr_resync_completed = j;
+ blk_start_plug(&plug);
while (j < max_sectors) {
sector_t sectors;
@@ -7552,6 +7553,7 @@ void md_do_sync(struct mddev *mddev)
* this also signals 'finished resyncing' to md_stop
*/
out:
+ blk_finish_plug(&plug);
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
/* tell personality that we are finished */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 9339e67..61a1833 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -474,7 +474,8 @@ static int multipath_run (struct mddev *mddev)
}
{
- mddev->thread = md_register_thread(multipathd, mddev, NULL);
+ mddev->thread = md_register_thread(multipathd, mddev,
+ "multipath");
if (!mddev->thread) {
printk(KERN_ERR "multipath: couldn't allocate thread"
" for %s\n", mdname(mddev));
diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
index 50ed53b..fc90c11 100644
--- a/drivers/md/persistent-data/dm-space-map-checker.c
+++ b/drivers/md/persistent-data/dm-space-map-checker.c
@@ -8,6 +8,7 @@
#include <linux/device-mapper.h>
#include <linux/export.h>
+#include <linux/vmalloc.h>
#ifdef CONFIG_DM_DEBUG_SPACE_MAPS
@@ -89,13 +90,23 @@ static int ca_create(struct count_array *ca, struct dm_space_map *sm)
ca->nr = nr_blocks;
ca->nr_free = nr_blocks;
- ca->counts = kzalloc(sizeof(*ca->counts) * nr_blocks, GFP_KERNEL);
- if (!ca->counts)
- return -ENOMEM;
+
+ if (!nr_blocks)
+ ca->counts = NULL;
+ else {
+ ca->counts = vzalloc(sizeof(*ca->counts) * nr_blocks);
+ if (!ca->counts)
+ return -ENOMEM;
+ }
return 0;
}
+static void ca_destroy(struct count_array *ca)
+{
+ vfree(ca->counts);
+}
+
static int ca_load(struct count_array *ca, struct dm_space_map *sm)
{
int r;
@@ -126,12 +137,14 @@ static int ca_load(struct count_array *ca, struct dm_space_map *sm)
static int ca_extend(struct count_array *ca, dm_block_t extra_blocks)
{
dm_block_t nr_blocks = ca->nr + extra_blocks;
- uint32_t *counts = kzalloc(sizeof(*counts) * nr_blocks, GFP_KERNEL);
+ uint32_t *counts = vzalloc(sizeof(*counts) * nr_blocks);
if (!counts)
return -ENOMEM;
- memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
- kfree(ca->counts);
+ if (ca->counts) {
+ memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
+ ca_destroy(ca);
+ }
ca->nr = nr_blocks;
ca->nr_free += extra_blocks;
ca->counts = counts;
@@ -151,11 +164,6 @@ static int ca_commit(struct count_array *old, struct count_array *new)
return 0;
}
-static void ca_destroy(struct count_array *ca)
-{
- kfree(ca->counts);
-}
-
/*----------------------------------------------------------------*/
struct sm_checker {
@@ -343,25 +351,25 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
int r;
struct sm_checker *smc;
- if (!sm)
- return NULL;
+ if (IS_ERR_OR_NULL(sm))
+ return ERR_PTR(-EINVAL);
smc = kmalloc(sizeof(*smc), GFP_KERNEL);
if (!smc)
- return NULL;
+ return ERR_PTR(-ENOMEM);
memcpy(&smc->sm, &ops_, sizeof(smc->sm));
r = ca_create(&smc->old_counts, sm);
if (r) {
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
r = ca_create(&smc->counts, sm);
if (r) {
ca_destroy(&smc->old_counts);
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
smc->real_sm = sm;
@@ -371,7 +379,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
ca_destroy(&smc->counts);
ca_destroy(&smc->old_counts);
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
r = ca_commit(&smc->old_counts, &smc->counts);
@@ -379,7 +387,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
ca_destroy(&smc->counts);
ca_destroy(&smc->old_counts);
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
return &smc->sm;
@@ -391,25 +399,25 @@ struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm)
int r;
struct sm_checker *smc;
- if (!sm)
- return NULL;
+ if (IS_ERR_OR_NULL(sm))
+ return ERR_PTR(-EINVAL);
smc = kmalloc(sizeof(*smc), GFP_KERNEL);
if (!smc)
- return NULL;
+ return ERR_PTR(-ENOMEM);
memcpy(&smc->sm, &ops_, sizeof(smc->sm));
r = ca_create(&smc->old_counts, sm);
if (r) {
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
r = ca_create(&smc->counts, sm);
if (r) {
ca_destroy(&smc->old_counts);
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
smc->real_sm = sm;
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index fc469ba..3d0ed53 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -290,7 +290,16 @@ struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
dm_block_t nr_blocks)
{
struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks);
- return dm_sm_checker_create_fresh(sm);
+ struct dm_space_map *smc;
+
+ if (IS_ERR_OR_NULL(sm))
+ return sm;
+
+ smc = dm_sm_checker_create_fresh(sm);
+ if (IS_ERR(smc))
+ dm_sm_destroy(sm);
+
+ return smc;
}
EXPORT_SYMBOL_GPL(dm_sm_disk_create);
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index 400fe14..e5604b3 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -138,6 +138,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
void dm_tm_destroy(struct dm_transaction_manager *tm)
{
+ if (!tm->is_clone)
+ wipe_shadow_table(tm);
+
kfree(tm);
}
EXPORT_SYMBOL_GPL(dm_tm_destroy);
@@ -344,8 +347,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
}
*sm = dm_sm_checker_create(inner);
- if (!*sm)
+ if (IS_ERR(*sm)) {
+ r = PTR_ERR(*sm);
goto bad2;
+ }
} else {
r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
@@ -364,8 +369,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
}
*sm = dm_sm_checker_create(inner);
- if (!*sm)
+ if (IS_ERR(*sm)) {
+ r = PTR_ERR(*sm);
goto bad2;
+ }
}
return 0;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 835de71..8c2754f 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -517,8 +517,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
int bad_sectors;
int disk = start_disk + i;
- if (disk >= conf->raid_disks)
- disk -= conf->raid_disks;
+ if (disk >= conf->raid_disks * 2)
+ disk -= conf->raid_disks * 2;
rdev = rcu_dereference(conf->mirrors[disk].rdev);
if (r1_bio->bios[disk] == IO_BLOCKED
@@ -883,7 +883,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
struct md_rdev *blocked_rdev;
- int plugged;
int first_clone;
int sectors_handled;
int max_sectors;
@@ -1034,7 +1033,6 @@ read_again:
* the bad blocks. Each set of writes gets it's own r1bio
* with a set of bios attached.
*/
- plugged = mddev_check_plugged(mddev);
disks = conf->raid_disks * 2;
retry_write:
@@ -1191,6 +1189,8 @@ read_again:
bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags);
+ if (!mddev_check_plugged(mddev))
+ md_wakeup_thread(mddev->thread);
}
/* Mustn't call r1_bio_write_done before this next test,
* as it could result in the bio being freed.
@@ -1213,9 +1213,6 @@ read_again:
/* In case raid1d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
-
- if (do_sync || !bitmap || !plugged)
- md_wakeup_thread(mddev->thread);
}
static void status(struct seq_file *seq, struct mddev *mddev)
@@ -2550,6 +2547,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
err = -EINVAL;
spin_lock_init(&conf->device_lock);
rdev_for_each(rdev, mddev) {
+ struct request_queue *q;
int disk_idx = rdev->raid_disk;
if (disk_idx >= mddev->raid_disks
|| disk_idx < 0)
@@ -2562,6 +2560,9 @@ static struct r1conf *setup_conf(struct mddev *mddev)
if (disk->rdev)
goto abort;
disk->rdev = rdev;
+ q = bdev_get_queue(rdev->bdev);
+ if (q->merge_bvec_fn)
+ mddev->merge_check_needed = 1;
disk->head_position = 0;
}
@@ -2617,7 +2618,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
goto abort;
}
err = -ENOMEM;
- conf->thread = md_register_thread(raid1d, mddev, NULL);
+ conf->thread = md_register_thread(raid1d, mddev, "raid1");
if (!conf->thread) {
printk(KERN_ERR
"md/raid1:%s: couldn't allocate thread\n",
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 987db37..8da6282 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1039,7 +1039,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
unsigned long flags;
struct md_rdev *blocked_rdev;
- int plugged;
int sectors_handled;
int max_sectors;
int sectors;
@@ -1239,7 +1238,6 @@ read_again:
* of r10_bios is recored in bio->bi_phys_segments just as with
* the read case.
*/
- plugged = mddev_check_plugged(mddev);
r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
raid10_find_phys(conf, r10_bio);
@@ -1396,6 +1394,8 @@ retry_write:
bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags);
+ if (!mddev_check_plugged(mddev))
+ md_wakeup_thread(mddev->thread);
if (!r10_bio->devs[i].repl_bio)
continue;
@@ -1423,6 +1423,8 @@ retry_write:
bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags);
+ if (!mddev_check_plugged(mddev))
+ md_wakeup_thread(mddev->thread);
}
/* Don't remove the bias on 'remaining' (one_write_done) until
@@ -1448,9 +1450,6 @@ retry_write:
/* In case raid10d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
-
- if (do_sync || !mddev->bitmap || !plugged)
- md_wakeup_thread(mddev->thread);
}
static void status(struct seq_file *seq, struct mddev *mddev)
@@ -2310,7 +2309,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
if (r10_sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect,
- s<<9, conf->tmppage, WRITE)
+ s, conf->tmppage, WRITE)
== 0) {
/* Well, this device is dead */
printk(KERN_NOTICE
@@ -2349,7 +2348,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
switch (r10_sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect,
- s<<9, conf->tmppage,
+ s, conf->tmppage,
READ)) {
case 0:
/* Well, this device is dead */
@@ -2512,7 +2511,7 @@ read_more:
slot = r10_bio->read_slot;
printk_ratelimited(
KERN_ERR
- "md/raid10:%s: %s: redirecting"
+ "md/raid10:%s: %s: redirecting "
"sector %llu to another mirror\n",
mdname(mddev),
bdevname(rdev->bdev, b),
@@ -2661,7 +2660,8 @@ static void raid10d(struct mddev *mddev)
blk_start_plug(&plug);
for (;;) {
- flush_pending_writes(conf);
+ if (atomic_read(&mddev->plug_cnt) == 0)
+ flush_pending_writes(conf);
spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head)) {
@@ -2890,6 +2890,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
/* want to reconstruct this device */
rb2 = r10_bio;
sect = raid10_find_virt(conf, sector_nr, i);
+ if (sect >= mddev->resync_max_sectors) {
+ /* last stripe is not complete - don't
+ * try to recover this sector.
+ */
+ continue;
+ }
/* Unless we are doing a full sync, or a replacement
* we only need to recover the block if it is set in
* the bitmap
@@ -3421,7 +3427,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
spin_lock_init(&conf->resync_lock);
init_waitqueue_head(&conf->wait_barrier);
- conf->thread = md_register_thread(raid10d, mddev, NULL);
+ conf->thread = md_register_thread(raid10d, mddev, "raid10");
if (!conf->thread)
goto out;
@@ -3475,6 +3481,7 @@ static int run(struct mddev *mddev)
rdev_for_each(rdev, mddev) {
long long diff;
+ struct request_queue *q;
disk_idx = rdev->raid_disk;
if (disk_idx < 0)
@@ -3493,6 +3500,9 @@ static int run(struct mddev *mddev)
goto out_free_conf;
disk->rdev = rdev;
}
+ q = bdev_get_queue(rdev->bdev);
+ if (q->merge_bvec_fn)
+ mddev->merge_check_needed = 1;
diff = (rdev->new_data_offset - rdev->data_offset);
if (!mddev->reshape_backwards)
diff = -diff;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d267672..04348d7 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -196,12 +196,14 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
BUG_ON(!list_empty(&sh->lru));
BUG_ON(atomic_read(&conf->active_stripes)==0);
if (test_bit(STRIPE_HANDLE, &sh->state)) {
- if (test_bit(STRIPE_DELAYED, &sh->state))
+ if (test_bit(STRIPE_DELAYED, &sh->state) &&
+ !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
list_add_tail(&sh->lru, &conf->delayed_list);
else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
sh->bm_seq - conf->seq_write > 0)
list_add_tail(&sh->lru, &conf->bitmap_list);
else {
+ clear_bit(STRIPE_DELAYED, &sh->state);
clear_bit(STRIPE_BIT_DELAY, &sh->state);
list_add_tail(&sh->lru, &conf->handle_list);
}
@@ -606,6 +608,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
* a chance*/
md_check_recovery(conf->mddev);
}
+ /*
+ * Because md_wait_for_blocked_rdev
+ * will dec nr_pending, we must
+ * increment it first.
+ */
+ atomic_inc(&rdev->nr_pending);
md_wait_for_blocked_rdev(rdev, conf->mddev);
} else {
/* Acknowledged bad block - skip the write */
@@ -1737,6 +1745,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
} else {
const char *bdn = bdevname(rdev->bdev, b);
int retry = 0;
+ int set_bad = 0;
clear_bit(R5_UPTODATE, &sh->dev[i].flags);
atomic_inc(&rdev->read_errors);
@@ -1748,7 +1757,8 @@ static void raid5_end_read_request(struct bio * bi, int error)
mdname(conf->mddev),
(unsigned long long)s,
bdn);
- else if (conf->mddev->degraded >= conf->max_degraded)
+ else if (conf->mddev->degraded >= conf->max_degraded) {
+ set_bad = 1;
printk_ratelimited(
KERN_WARNING
"md/raid:%s: read error not correctable "
@@ -1756,8 +1766,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
mdname(conf->mddev),
(unsigned long long)s,
bdn);
- else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
+ } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
/* Oh, no!!! */
+ set_bad = 1;
printk_ratelimited(
KERN_WARNING
"md/raid:%s: read error NOT corrected!! "
@@ -1765,7 +1776,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
mdname(conf->mddev),
(unsigned long long)s,
bdn);
- else if (atomic_read(&rdev->read_errors)
+ } else if (atomic_read(&rdev->read_errors)
> conf->max_nr_stripes)
printk(KERN_WARNING
"md/raid:%s: Too many read errors, failing device %s.\n",
@@ -1777,7 +1788,11 @@ static void raid5_end_read_request(struct bio * bi, int error)
else {
clear_bit(R5_ReadError, &sh->dev[i].flags);
clear_bit(R5_ReWrite, &sh->dev[i].flags);
- md_error(conf->mddev, rdev);
+ if (!(set_bad
+ && test_bit(In_sync, &rdev->flags)
+ && rdev_set_badblocks(
+ rdev, sh->sector, STRIPE_SECTORS, 0)))
+ md_error(conf->mddev, rdev);
}
}
rdev_dec_pending(rdev, conf->mddev);
@@ -3582,8 +3597,18 @@ static void handle_stripe(struct stripe_head *sh)
finish:
/* wait for this device to become unblocked */
- if (conf->mddev->external && unlikely(s.blocked_rdev))
- md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
+ if (unlikely(s.blocked_rdev)) {
+ if (conf->mddev->external)
+ md_wait_for_blocked_rdev(s.blocked_rdev,
+ conf->mddev);
+ else
+ /* Internal metadata will immediately
+ * be written by raid5d, so we don't
+ * need to wait here.
+ */
+ rdev_dec_pending(s.blocked_rdev,
+ conf->mddev);
+ }
if (s.handle_bad_blocks)
for (i = disks; i--; ) {
@@ -3881,8 +3906,6 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
raid_bio->bi_next = (void*)rdev;
align_bi->bi_bdev = rdev->bdev;
align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
- /* No reshape active, so we can trust rdev->data_offset */
- align_bi->bi_sector += rdev->data_offset;
if (!bio_fits_rdev(align_bi) ||
is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
@@ -3893,6 +3916,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
return 0;
}
+ /* No reshape active, so we can trust rdev->data_offset */
+ align_bi->bi_sector += rdev->data_offset;
+
spin_lock_irq(&conf->device_lock);
wait_event_lock_irq(conf->wait_for_stripe,
conf->quiesce == 0,
@@ -3971,7 +3997,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
struct stripe_head *sh;
const int rw = bio_data_dir(bi);
int remaining;
- int plugged;
if (unlikely(bi->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bi);
@@ -3990,7 +4015,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
- plugged = mddev_check_plugged(mddev);
for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
DEFINE_WAIT(w);
int previous;
@@ -4092,6 +4116,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
if ((bi->bi_rw & REQ_SYNC) &&
!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
atomic_inc(&conf->preread_active_stripes);
+ mddev_check_plugged(mddev);
release_stripe(sh);
} else {
/* cannot get stripe for read-ahead, just give-up */
@@ -4099,10 +4124,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
finish_wait(&conf->wait_for_overlap, &w);
break;
}
-
}
- if (!plugged)
- md_wakeup_thread(mddev->thread);
spin_lock_irq(&conf->device_lock);
remaining = raid5_dec_bi_phys_segments(bi);
@@ -4823,6 +4845,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
int raid_disk, memory, max_disks;
struct md_rdev *rdev;
struct disk_info *disk;
+ char pers_name[6];
if (mddev->new_level != 5
&& mddev->new_level != 4
@@ -4946,7 +4969,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
mdname(mddev), memory);
- conf->thread = md_register_thread(raid5d, mddev, NULL);
+ sprintf(pers_name, "raid%d", mddev->new_level);
+ conf->thread = md_register_thread(raid5d, mddev, pers_name);
if (!conf->thread) {
printk(KERN_ERR
"md/raid:%s: couldn't allocate thread.\n",
@@ -5465,10 +5489,9 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev->saved_raid_disk >= 0 &&
rdev->saved_raid_disk >= first &&
conf->disks[rdev->saved_raid_disk].rdev == NULL)
- disk = rdev->saved_raid_disk;
- else
- disk = first;
- for ( ; disk <= last ; disk++) {
+ first = rdev->saved_raid_disk;
+
+ for (disk = first; disk <= last; disk++) {
p = conf->disks + disk;
if (p->rdev == NULL) {
clear_bit(In_sync, &rdev->flags);
@@ -5477,8 +5500,11 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev->saved_raid_disk != disk)
conf->fullsync = 1;
rcu_assign_pointer(p->rdev, rdev);
- break;
+ goto out;
}
+ }
+ for (disk = first; disk <= last; disk++) {
+ p = conf->disks + disk;
if (test_bit(WantReplacement, &p->rdev->flags) &&
p->replacement == NULL) {
clear_bit(In_sync, &rdev->flags);
@@ -5490,6 +5516,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
break;
}
}
+out:
print_raid5_conf(conf);
return err;
}
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index 7d42c11..0cdbd74 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -198,7 +198,6 @@ static int fops_open(struct file *file)
struct saa7146_dev *dev = video_drvdata(file);
struct saa7146_fh *fh = NULL;
int result = 0;
- enum v4l2_buf_type type;
DEB_EE("file:%p, dev:%s\n", file, video_device_node_name(vdev));
@@ -207,10 +206,6 @@ static int fops_open(struct file *file)
DEB_D("using: %p\n", dev);
- type = vdev->vfl_type == VFL_TYPE_GRABBER
- ? V4L2_BUF_TYPE_VIDEO_CAPTURE
- : V4L2_BUF_TYPE_VBI_CAPTURE;
-
/* check if an extension is registered */
if( NULL == dev->ext ) {
DEB_S("no extension registered for this device\n");
diff --git a/drivers/media/dvb/frontends/cx24110.c b/drivers/media/dvb/frontends/cx24110.c
index 98ecaf0..3180f5b 100644
--- a/drivers/media/dvb/frontends/cx24110.c
+++ b/drivers/media/dvb/frontends/cx24110.c
@@ -516,9 +516,9 @@ static int cx24110_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
if(cx24110_readreg(state,0x10)&0x40) {
/* the RS error counter has finished one counting window */
cx24110_writereg(state,0x10,0x60); /* select the byer reg */
- cx24110_readreg(state, 0x12) |
+ (void)(cx24110_readreg(state, 0x12) |
(cx24110_readreg(state, 0x13) << 8) |
- (cx24110_readreg(state, 0x14) << 16);
+ (cx24110_readreg(state, 0x14) << 16));
cx24110_writereg(state,0x10,0x70); /* select the bler reg */
state->lastbler=cx24110_readreg(state,0x12)|
(cx24110_readreg(state,0x13)<<8)|
diff --git a/drivers/media/dvb/frontends/cxd2820r_c.c b/drivers/media/dvb/frontends/cxd2820r_c.c
index 9454049..ed3b0ba6 100644
--- a/drivers/media/dvb/frontends/cxd2820r_c.c
+++ b/drivers/media/dvb/frontends/cxd2820r_c.c
@@ -121,7 +121,7 @@ int cxd2820r_get_frontend_c(struct dvb_frontend *fe)
if (ret)
goto error;
- switch ((buf[0] >> 0) & 0x03) {
+ switch ((buf[0] >> 0) & 0x07) {
case 0:
c->modulation = QAM_16;
break;
diff --git a/drivers/media/dvb/frontends/lg2160.c b/drivers/media/dvb/frontends/lg2160.c
index a3ab1a5..cc11260 100644
--- a/drivers/media/dvb/frontends/lg2160.c
+++ b/drivers/media/dvb/frontends/lg2160.c
@@ -126,7 +126,7 @@ static int lg216x_write_regs(struct lg216x_state *state,
lg_reg("writing %d registers...\n", len);
- for (i = 0; i < len - 1; i++) {
+ for (i = 0; i < len; i++) {
ret = lg216x_write_reg(state, regs[i].reg, regs[i].val);
if (lg_fail(ret))
return ret;
diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
index 63c004a..664e460 100644
--- a/drivers/media/dvb/siano/smsusb.c
+++ b/drivers/media/dvb/siano/smsusb.c
@@ -544,6 +544,8 @@ static const struct usb_device_id smsusb_id_table[] __devinitconst = {
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
{ USB_DEVICE(0x2040, 0xc0a0),
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+ { USB_DEVICE(0x2040, 0xf5a0),
+ .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
{ } /* Terminating entry */
};
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index 740a3d5..b415211 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -157,7 +157,7 @@ static int __devinit maxiradio_probe(struct pci_dev *pdev, const struct pci_devi
goto err_out_free_region;
dev->io = pci_resource_start(pdev, 0);
- if (snd_tea575x_init(&dev->tea)) {
+ if (snd_tea575x_init(&dev->tea, THIS_MODULE)) {
printk(KERN_ERR "radio-maxiradio: Unable to detect TEA575x tuner\n");
goto err_out_free_region;
}
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index 52b8011..4efcbec 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -238,7 +238,7 @@ static int __devinit fmr2_probe(struct fmr2 *fmr2, struct device *pdev, int io)
snprintf(fmr2->tea.bus_info, sizeof(fmr2->tea.bus_info), "%s:%s",
fmr2->is_fmd2 ? "PnP" : "ISA", dev_name(pdev));
- if (snd_tea575x_init(&fmr2->tea)) {
+ if (snd_tea575x_init(&fmr2->tea, THIS_MODULE)) {
printk(KERN_ERR "radio-sf16fmr2: Unable to detect TEA575x tuner\n");
release_region(fmr2->io, 2);
return -ENODEV;
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index e9f6387..f412f7a 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -51,6 +51,8 @@ static struct usb_device_id si470x_usb_driver_id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x1b80, 0xd700, USB_CLASS_HID, 0, 0) },
/* Sanei Electric, Inc. FM USB Radio (sold as DealExtreme.com PCear) */
{ USB_DEVICE_AND_INTERFACE_INFO(0x10c5, 0x819a, USB_CLASS_HID, 0, 0) },
+ /* Axentia ALERT FM USB Receiver */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x12cf, 0x7111, USB_CLASS_HID, 0, 0) },
/* Terminating entry */
{ }
};
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index ff2933a..856ab96 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -371,7 +371,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 2, 0, 0, 0 },
.gpiomute = 10,
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -384,7 +383,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 1, 2, 3 },
.gpiomute = 4,
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -398,7 +396,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 4, 0, 2, 3 },
.gpiomute = 1,
.no_msp34xx = 1,
- .needs_tvaudio = 1,
.tuner_type = TUNER_PHILIPS_NTSC,
.tuner_addr = ADDR_UNSET,
.pll = PLL_28,
@@ -414,7 +411,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
},
@@ -427,7 +423,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 0),
.gpiomux = { 0, 1, 0, 1 },
.gpiomute = 3,
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -440,7 +435,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0x0f,
.gpiomux = { 0x0c, 0x04, 0x08, 0x04 },
/* 0x04 for some cards ?? */
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.audio_mode_gpio= avermedia_tvphone_audio,
@@ -454,7 +448,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 0, 0),
.gpiomux = { 0 },
- .needs_tvaudio = 1,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
},
@@ -469,7 +462,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 0xc00, 0x800, 0x400 },
.gpiomute = 0xc00,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -482,7 +474,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 3,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 1, 1, 2, 3 },
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_TEMIC_PAL,
.tuner_addr = ADDR_UNSET,
@@ -496,7 +487,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 0, 1, 1),
.gpiomux = { 0, 1, 2, 3 },
.gpiomute = 4,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -510,7 +500,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x20001,0x10001, 0, 0 },
.gpiomute = 10,
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -524,7 +513,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 15,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 13, 14, 11, 7 },
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -536,7 +524,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 15,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 13, 14, 11, 7 },
- .needs_tvaudio = 1,
.msp34xx_alt = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
@@ -553,7 +540,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 2, 1, 3 }, /* old: {0, 1, 2, 3, 4} */
.gpiomute = 4,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -567,7 +553,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 0, 1, 0 },
.gpiomute = 10,
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -583,7 +568,6 @@ struct tvcard bttv_tvcards[] = {
/* 2003-10-20 by "Anton A. Arapov" <arapov@mail.ru> */
.gpiomux = { 0x001e00, 0, 0x018000, 0x014000 },
.gpiomute = 0x002000,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -597,7 +581,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1, 0),
.gpiomux = { 0x4fa007,0xcfa007,0xcfa007,0xcfa007 },
.gpiomute = 0xcfa007,
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.volume_gpio = winview_volume,
@@ -611,7 +594,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 1, 0, 0, 0 },
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -660,7 +642,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 1, 0x800, 0x400 },
.gpiomute = 0xc00,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -691,7 +672,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = {0x400, 0x400, 0x400, 0x400 },
.gpiomute = 0xc00,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -706,7 +686,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x20000, 0x30000, 0x10000, 0 },
.gpiomute = 0x40000,
- .needs_tvaudio = 0,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.audio_mode_gpio= terratv_audio,
@@ -720,7 +699,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 0, 1, 1),
.gpiomux = { 0, 1, 2, 3 },
.gpiomute = 4,
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -748,7 +726,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x20000, 0x30000, 0x10000, 0x00000 },
.gpiomute = 0x40000,
- .needs_tvaudio = 0,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.audio_mode_gpio= terratv_audio,
@@ -793,7 +770,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 0, 0),
.gpiomux = { 0 },
- .needs_tvaudio = 1,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.muxsel_hook = PXC200_muxsel,
@@ -834,7 +810,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
},
@@ -847,7 +822,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x500, 0, 0x300, 0x900 },
.gpiomute = 0x900,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -874,7 +848,6 @@ struct tvcard bttv_tvcards[] = {
Note: There exists another variant "Winfast 2000" with tv stereo !?
Note: eeprom only contains FF and pci subsystem id 107d:6606
*/
- .needs_tvaudio = 0,
.pll = PLL_28,
.has_radio = 1,
.tuner_type = TUNER_PHILIPS_PAL, /* default for now, gpio reads BFFF06 for Pal bg+dk */
@@ -934,7 +907,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 0),
.gpiomux = { 0x551400, 0x551200, 0, 0 },
.gpiomute = 0x551c00,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL_I,
.tuner_addr = ADDR_UNSET,
@@ -949,7 +921,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 2, 0xd0001, 0, 0 },
.gpiomute = 1,
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -966,7 +937,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 4, 0, 2, 3 },
.gpiomute = 1,
.no_msp34xx = 1,
- .needs_tvaudio = 1,
.tuner_type = TUNER_PHILIPS_NTSC,
.tuner_addr = ADDR_UNSET,
.pll = PLL_28,
@@ -980,7 +950,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 15,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 13, 4, 11, 7 },
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -995,7 +964,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 0, 0, 0},
- .needs_tvaudio = 1,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL_I,
@@ -1066,7 +1034,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x20000, 0x30000, 0x10000, 0 },
.gpiomute = 0x40000,
- .needs_tvaudio = 1,
.no_msp34xx = 1,
.pll = PLL_35,
.tuner_type = TUNER_PHILIPS_PAL_I,
@@ -1084,7 +1051,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = {2,0,0,0 },
.gpiomute = 1,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -1163,7 +1129,6 @@ struct tvcard bttv_tvcards[] = {
MUX2 (mask 0x30000):
0,2,3= from MSP34xx
1= FM stereo Radio from Tuner */
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -1179,7 +1144,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 0, 0x10, 8 },
.gpiomute = 4,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -1218,7 +1182,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 0),
.gpiomux = { 2, 0, 0, 0 },
.gpiomute = 10,
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_TEMIC_PAL,
.tuner_addr = ADDR_UNSET,
@@ -1250,7 +1213,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(3, 1),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_35,
.tuner_type = TUNER_ABSENT,
@@ -1266,7 +1228,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x400, 0x400, 0x400, 0x400 },
.gpiomute = 0x800,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_TEMIC_4036FY5_NTSC,
.tuner_addr = ADDR_UNSET,
@@ -1312,7 +1273,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 2),
.gpiomux = { },
.no_msp34xx = 1,
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -1329,7 +1289,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 0),
.gpiomux = { 1, 0, 4, 4 },
.gpiomute = 9,
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -1379,7 +1338,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomute = 0x1800,
.audio_mode_gpio= fv2000s_audio,
.no_msp34xx = 1,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -1393,7 +1351,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x500, 0x500, 0x300, 0x900 },
.gpiomute = 0x900,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -1477,7 +1434,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 0, 11, 7 }, /* TV and Radio with same GPIO ! */
.gpiomute = 13,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_LG_PAL_I_FM,
.tuner_addr = ADDR_UNSET,
@@ -1514,7 +1470,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x01, 0x00, 0x03, 0x03 },
.gpiomute = 0x09,
- .needs_tvaudio = 1,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
@@ -1540,7 +1495,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 0, 0),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
},
@@ -1567,7 +1521,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 1, 1),
.gpiomux = { 0, 1, 2, 2 },
.gpiomute = 4,
- .needs_tvaudio = 0,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.pll = PLL_28,
@@ -1597,7 +1550,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 0),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
@@ -1619,7 +1571,6 @@ struct tvcard bttv_tvcards[] = {
* btwincap uses 0x80000/0x80003
*/
.gpiomute = 4,
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
@@ -1655,7 +1606,6 @@ struct tvcard bttv_tvcards[] = {
/* .audio_inputs= 1, */
.svhs = 2,
.muxsel = MUXSEL(2, 0, 1, 1),
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -1875,7 +1825,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 1, 2, 3},
.gpiomute = 4,
- .needs_tvaudio = 1,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.pll = PLL_28,
@@ -1902,7 +1851,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
@@ -1920,7 +1868,6 @@ struct tvcard bttv_tvcards[] = {
/* Tuner, Radio, external, internal, off, on */
.gpiomux = { 0x08, 0x0f, 0x0a, 0x08 },
.gpiomute = 0x0f,
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_NTSC,
@@ -1936,7 +1883,6 @@ struct tvcard bttv_tvcards[] = {
.svhs = 2,
.gpiomask = 0x00,
.muxsel = MUXSEL(2, 3, 1, 1),
- .needs_tvaudio = 1,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
@@ -2034,7 +1980,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 0),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
@@ -2049,7 +1994,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0x00,
.muxsel = MUXSEL(2, 3, 1, 0),
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2062,7 +2006,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0x00,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2079,7 +2022,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 2, 2, 2, 3, 3, 3, 3, 1, 0),
.muxsel_hook = phytec_muxsel,
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2094,7 +2036,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 2, 2, 2, 3, 3, 3, 3, 1, 1),
.muxsel_hook = phytec_muxsel,
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2118,7 +2059,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.svhs = NO_SVHS, /* card has no svhs */
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.no_tda7432 = 1,
.gpiomask = 0x00,
@@ -2168,7 +2108,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 3,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 1, 1, 1, 1 },
- .needs_tvaudio = 1,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.pll = PLL_35,
@@ -2210,7 +2149,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 0),
.no_msp34xx = 1,
.no_tda7432 = 1,
- .needs_tvaudio = 0,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
},
@@ -2222,7 +2160,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.svhs = 2,
- .needs_tvaudio = 0,
.gpiomask = 0x68,
.muxsel = MUXSEL(2, 3, 1),
.gpiomux = { 0x68, 0x68, 0x61, 0x61 },
@@ -2241,7 +2178,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 1, 2, 2 },
.gpiomute = 3,
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -2265,7 +2201,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 2, 2, 2),
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
.pll = PLL_28,
- .needs_tvaudio = 0,
.muxsel_hook = picolo_tetra_muxsel,/*Required as it doesn't follow the classic input selection policy*/
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2358,7 +2293,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 2, 0, 0, 0 },
.gpiomute = 10,
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -2405,7 +2339,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_addr = ADDR_UNSET,
.gpiomask = 0x008007,
.gpiomux = { 0, 0x000001,0,0 },
- .needs_tvaudio = 1,
.has_radio = 1,
},
[BTTV_BOARD_TIBET_CS16] = {
@@ -2518,7 +2451,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x001e00, 0, 0x018000, 0x014000 },
.gpiomute = 0x002000,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_YMEC_TVF66T5_B_DFF,
.tuner_addr = 0xc1 >>1,
@@ -2534,7 +2466,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 1, 2, 2 },
.gpiomute = 3,
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_TENA_9533_DI,
.tuner_addr = ADDR_UNSET,
@@ -2615,7 +2546,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 2, 0, 0, 0 },
.gpiomute = 1,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_NTSC,
.tuner_addr = ADDR_UNSET,
@@ -2714,7 +2644,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x20001,0x10001, 0, 0 },
.gpiomute = 10,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL_I,
.tuner_addr = ADDR_UNSET,
@@ -2746,7 +2675,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 1, 2, 2 }, /* CONTVFMi */
.gpiomute = 3, /* CONTVFMi */
- .needs_tvaudio = 0,
.tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* TCL MK3 */
.tuner_addr = ADDR_UNSET,
.pll = PLL_28,
@@ -2785,7 +2713,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0x00,
.muxsel = MUXSEL(0, 2, 3, 1),
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2799,7 +2726,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0x00,
.muxsel = MUXSEL(2, 3, 1),
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2813,7 +2739,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0x00,
.muxsel = MUXSEL(3, 2, 1),
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2877,7 +2802,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
@@ -3649,7 +3573,7 @@ void __devinit bttv_init_tuner(struct bttv *btv)
struct tuner_setup tun_setup;
/* Load tuner module before issuing tuner config call! */
- if (bttv_tvcards[btv->c.type].has_radio)
+ if (btv->has_radio)
v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
&btv->c.i2c_adap, "tuner",
0, v4l2_i2c_tuner_addrs(ADDRS_RADIO));
@@ -3664,7 +3588,7 @@ void __devinit bttv_init_tuner(struct bttv *btv)
tun_setup.type = btv->tuner_type;
tun_setup.addr = addr;
- if (bttv_tvcards[btv->c.type].has_radio)
+ if (btv->has_radio)
tun_setup.mode_mask |= T_RADIO;
bttv_call_all(btv, tuner, s_type_addr, &tun_setup);
@@ -3724,6 +3648,10 @@ static void __devinit hauppauge_eeprom(struct bttv *btv)
bttv_tvcards[BTTV_BOARD_HAUPPAUGE_IMPACTVCB].name);
btv->c.type = BTTV_BOARD_HAUPPAUGE_IMPACTVCB;
}
+
+ /* The 61334 needs the msp3410 to do the radio demod to get sound */
+ if (tv.model == 61334)
+ btv->radio_uses_msp_demodulator = 1;
}
static int terratec_active_radio_upgrade(struct bttv *btv)
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index a9cfb0f..ff7a589 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -1218,6 +1218,11 @@ audio_mux(struct bttv *btv, int input, int mute)
For now this is sufficient. */
switch (input) {
case TVAUDIO_INPUT_RADIO:
+ /* Some boards need the msp do to the radio demod */
+ if (btv->radio_uses_msp_demodulator) {
+ in = MSP_INPUT_DEFAULT;
+ break;
+ }
in = MSP_INPUT(MSP_IN_SCART2, MSP_IN_TUNER1,
MSP_DSP_IN_SCART, MSP_DSP_IN_SCART);
break;
diff --git a/drivers/media/video/bt8xx/bttv.h b/drivers/media/video/bt8xx/bttv.h
index c517161..acfe2f3 100644
--- a/drivers/media/video/bt8xx/bttv.h
+++ b/drivers/media/video/bt8xx/bttv.h
@@ -236,7 +236,6 @@ struct tvcard {
/* i2c audio flags */
unsigned int no_msp34xx:1;
unsigned int no_tda7432:1;
- unsigned int needs_tvaudio:1;
unsigned int msp34xx_alt:1;
/* Note: currently no card definition needs to mark the presence
of a RDS saa6588 chip. If this is ever needed, then add a new
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h
index db943a8d..70fd4f23 100644
--- a/drivers/media/video/bt8xx/bttvp.h
+++ b/drivers/media/video/bt8xx/bttvp.h
@@ -440,6 +440,7 @@ struct bttv {
/* radio data/state */
int has_radio;
int radio_user;
+ int radio_uses_msp_demodulator;
/* miro/pinnacle + Aimslab VHX
philips matchbox (tea5757 radio tuner) support */
diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c
index 2520219..5b75a64 100644
--- a/drivers/media/video/bw-qcam.c
+++ b/drivers/media/video/bw-qcam.c
@@ -607,8 +607,9 @@ static long qc_capture(struct qcam *q, char __user *buf, unsigned long len)
}
o = i * pixels_per_line + pixels_read + k;
if (o < len) {
+ u8 ch = invert - buffer[k];
got++;
- put_user((invert - buffer[k]) << shift, buf + o);
+ put_user(ch << shift, buf + o);
}
}
pixels_read += bytes;
@@ -648,8 +649,8 @@ static int qcam_querycap(struct file *file, void *priv,
struct qcam *qcam = video_drvdata(file);
strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver));
- strlcpy(vcap->card, "B&W Quickcam", sizeof(vcap->card));
- strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
+ strlcpy(vcap->card, "Connectix B&W Quickcam", sizeof(vcap->card));
+ strlcpy(vcap->bus_info, qcam->pport->name, sizeof(vcap->bus_info));
vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
@@ -688,8 +689,8 @@ static int qcam_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
pix->height = qcam->height / qcam->transfer_scale;
pix->pixelformat = (qcam->bpp == 4) ? V4L2_PIX_FMT_Y4 : V4L2_PIX_FMT_Y6;
pix->field = V4L2_FIELD_NONE;
- pix->bytesperline = qcam->width;
- pix->sizeimage = qcam->width * qcam->height;
+ pix->bytesperline = pix->width;
+ pix->sizeimage = pix->width * pix->height;
/* Just a guess */
pix->colorspace = V4L2_COLORSPACE_SRGB;
return 0;
@@ -757,7 +758,7 @@ static int qcam_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdes
"4-Bit Monochrome", V4L2_PIX_FMT_Y4,
{ 0, 0, 0, 0 }
},
- { 0, 0, 0,
+ { 1, 0, 0,
"6-Bit Monochrome", V4L2_PIX_FMT_Y6,
{ 0, 0, 0, 0 }
},
@@ -772,6 +773,25 @@ static int qcam_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdes
return 0;
}
+static int qcam_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ static const struct v4l2_frmsize_discrete sizes[] = {
+ { 80, 60 },
+ { 160, 120 },
+ { 320, 240 },
+ };
+
+ if (fsize->index > 2)
+ return -EINVAL;
+ if (fsize->pixel_format != V4L2_PIX_FMT_Y4 &&
+ fsize->pixel_format != V4L2_PIX_FMT_Y6)
+ return -EINVAL;
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete = sizes[fsize->index];
+ return 0;
+}
+
static ssize_t qcam_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -795,6 +815,11 @@ static ssize_t qcam_read(struct file *file, char __user *buf,
return len;
}
+static unsigned int qcam_poll(struct file *filp, poll_table *wait)
+{
+ return v4l2_ctrl_poll(filp, wait) | POLLIN | POLLRDNORM;
+}
+
static int qcam_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct qcam *qcam =
@@ -828,7 +853,7 @@ static const struct v4l2_file_operations qcam_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
.release = v4l2_fh_release,
- .poll = v4l2_ctrl_poll,
+ .poll = qcam_poll,
.unlocked_ioctl = video_ioctl2,
.read = qcam_read,
};
@@ -839,6 +864,7 @@ static const struct v4l2_ioctl_ops qcam_ioctl_ops = {
.vidioc_s_input = qcam_s_input,
.vidioc_enum_input = qcam_enum_input,
.vidioc_enum_fmt_vid_cap = qcam_enum_fmt_vid_cap,
+ .vidioc_enum_framesizes = qcam_enum_framesizes,
.vidioc_g_fmt_vid_cap = qcam_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = qcam_s_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = qcam_try_fmt_vid_cap,
@@ -864,9 +890,9 @@ static struct qcam *qcam_init(struct parport *port)
return NULL;
v4l2_dev = &qcam->v4l2_dev;
- strlcpy(v4l2_dev->name, "bw-qcam", sizeof(v4l2_dev->name));
+ snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "bw-qcam%d", num_cams);
- if (v4l2_device_register(NULL, v4l2_dev) < 0) {
+ if (v4l2_device_register(port->dev, v4l2_dev) < 0) {
v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
kfree(qcam);
return NULL;
@@ -886,7 +912,7 @@ static struct qcam *qcam_init(struct parport *port)
return NULL;
}
qcam->pport = port;
- qcam->pdev = parport_register_device(port, "bw-qcam", NULL, NULL,
+ qcam->pdev = parport_register_device(port, v4l2_dev->name, NULL, NULL,
NULL, 0, NULL);
if (qcam->pdev == NULL) {
v4l2_err(v4l2_dev, "couldn't register for %s.\n", port->name);
@@ -975,6 +1001,7 @@ static int init_bwqcam(struct parport *port)
return -ENODEV;
}
qc_calibrate(qcam);
+ v4l2_ctrl_handler_setup(&qcam->hdl);
parport_release(qcam->pdev);
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index b55d57c..7e5ffd6 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -838,10 +838,10 @@ static int cx18_setup_pci(struct cx18 *cx, struct pci_dev *pci_dev,
}
CX18_DEBUG_INFO("cx%d (rev %d) at %02x:%02x.%x, "
- "irq: %d, latency: %d, memory: 0x%lx\n",
+ "irq: %d, latency: %d, memory: 0x%llx\n",
cx->pci_dev->device, cx->card_rev, pci_dev->bus->number,
PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn),
- cx->pci_dev->irq, pci_latency, (unsigned long)cx->base_addr);
+ cx->pci_dev->irq, pci_latency, (u64)cx->base_addr);
return 0;
}
@@ -938,7 +938,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
if (retval)
goto err;
- CX18_DEBUG_INFO("base addr: 0x%08x\n", cx->base_addr);
+ CX18_DEBUG_INFO("base addr: 0x%llx\n", (u64)cx->base_addr);
/* PCI Device Setup */
retval = cx18_setup_pci(cx, pci_dev, pci_id);
@@ -946,8 +946,8 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
goto free_workqueues;
/* map io memory */
- CX18_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
- cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
+ CX18_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+ (u64)cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
cx->enc_mem = ioremap_nocache(cx->base_addr + CX18_MEM_OFFSET,
CX18_MEM_SIZE);
if (!cx->enc_mem) {
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index 7a37e0e..2767c64 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -622,7 +622,7 @@ struct cx18 {
unique ID. Starts at 1, so 0 can be used as
uninitialized value in the stream->id. */
- u32 base_addr;
+ resource_size_t base_addr;
u8 card_rev;
void __iomem *enc_mem, *reg_mem;
diff --git a/drivers/media/video/cx18/cx18-firmware.c b/drivers/media/video/cx18/cx18-firmware.c
index 1b3fb50..b85c292 100644
--- a/drivers/media/video/cx18/cx18-firmware.c
+++ b/drivers/media/video/cx18/cx18-firmware.c
@@ -164,8 +164,13 @@ static int load_apu_fw_direct(const char *fn, u8 __iomem *dst, struct cx18 *cx,
apu_version = (vers[0] << 24) | (vers[4] << 16) | vers[32];
while (offset + sizeof(seghdr) < fw->size) {
- /* TODO: byteswapping */
- memcpy(&seghdr, src + offset / 4, sizeof(seghdr));
+ const u32 *shptr = src + offset / 4;
+
+ seghdr.sync1 = le32_to_cpu(shptr[0]);
+ seghdr.sync2 = le32_to_cpu(shptr[1]);
+ seghdr.addr = le32_to_cpu(shptr[2]);
+ seghdr.size = le32_to_cpu(shptr[3]);
+
offset += sizeof(seghdr);
if (seghdr.sync1 != APU_ROM_SYNC1 ||
seghdr.sync2 != APU_ROM_SYNC2) {
diff --git a/drivers/media/video/cx18/cx18-mailbox.c b/drivers/media/video/cx18/cx18-mailbox.c
index ed81183..eabf00c 100644
--- a/drivers/media/video/cx18/cx18-mailbox.c
+++ b/drivers/media/video/cx18/cx18-mailbox.c
@@ -434,6 +434,7 @@ static int epu_dma_done_irq(struct cx18 *cx, struct cx18_in_work_order *order)
{
u32 handle, mdl_ack_offset, mdl_ack_count;
struct cx18_mailbox *mb;
+ int i;
mb = &order->mb;
handle = mb->args[0];
@@ -447,8 +448,9 @@ static int epu_dma_done_irq(struct cx18 *cx, struct cx18_in_work_order *order)
return -1;
}
- cx18_memcpy_fromio(cx, order->mdl_ack, cx->enc_mem + mdl_ack_offset,
- sizeof(struct cx18_mdl_ack) * mdl_ack_count);
+ for (i = 0; i < sizeof(struct cx18_mdl_ack) * mdl_ack_count; i += sizeof(u32))
+ ((u32 *)order->mdl_ack)[i / sizeof(u32)] =
+ cx18_readl(cx, cx->enc_mem + mdl_ack_offset + i);
if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
mb_ack_irq(cx, order);
@@ -538,6 +540,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
struct cx18_mailbox *order_mb;
struct cx18_in_work_order *order;
int submit;
+ int i;
switch (rpu) {
case CPU:
@@ -562,10 +565,12 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
order_mb = &order->mb;
/* mb->cmd and mb->args[0] through mb->args[2] */
- cx18_memcpy_fromio(cx, &order_mb->cmd, &mb->cmd, 4 * sizeof(u32));
+ for (i = 0; i < 4; i++)
+ (&order_mb->cmd)[i] = cx18_readl(cx, &mb->cmd + i);
+
/* mb->request and mb->ack. N.B. we want to read mb->ack last */
- cx18_memcpy_fromio(cx, &order_mb->request, &mb->request,
- 2 * sizeof(u32));
+ for (i = 0; i < 2; i++)
+ (&order_mb->request)[i] = cx18_readl(cx, &mb->request + i);
if (order_mb->request == order_mb->ack) {
CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index e46446a..ed7b2aa 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -471,7 +471,7 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
dprintk(1,"Loading firmware ...\n");
dataptr = (u32*)firmware->data;
for (i = 0; i < (firmware->size >> 2); i++) {
- value = *dataptr;
+ value = le32_to_cpu(*dataptr);
checksum += ~value;
memory_write(dev->core, i, value);
dataptr++;
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 20a7e24..92da7c2 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -974,6 +974,7 @@ struct em28xx_board em28xx_boards[] = {
[EM2884_BOARD_CINERGY_HTC_STICK] = {
.name = "Terratec Cinergy HTC Stick",
.has_dvb = 1,
+ .ir_codes = RC_MAP_NEC_TERRATEC_CINERGY_XS,
#if 0
.tuner_type = TUNER_PHILIPS_TDA8290,
.tuner_addr = 0x41,
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index fce5f76..5e30c4f 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -527,6 +527,8 @@ static int em28xx_ir_init(struct em28xx *dev)
if (dev->board.ir_codes == NULL) {
/* No remote control support */
+ em28xx_warn("Remote control support is not available for "
+ "this card.\n");
return 0;
}
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 137166d..31721ea 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -1653,7 +1653,7 @@ static int vidioc_streamoff(struct file *file, void *priv,
enum v4l2_buf_type buf_type)
{
struct gspca_dev *gspca_dev = video_drvdata(file);
- int ret;
+ int i, ret;
if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -1678,6 +1678,8 @@ static int vidioc_streamoff(struct file *file, void *priv,
wake_up_interruptible(&gspca_dev->wq);
/* empty the transfer queues */
+ for (i = 0; i < gspca_dev->nframes; i++)
+ gspca_dev->frame[i].v4l2_buf.flags &= ~BUF_ALL_FLAGS;
atomic_set(&gspca_dev->fr_q, 0);
atomic_set(&gspca_dev->fr_i, 0);
gspca_dev->fr_o = 0;
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index b5acb1e..80c81dd 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -96,7 +96,7 @@ static void setbrightness(struct gspca_dev *gspca_dev);
static void setcontrast(struct gspca_dev *gspca_dev);
static void setgain(struct gspca_dev *gspca_dev);
static void setexposure(struct gspca_dev *gspca_dev);
-static int sd_setagc(struct gspca_dev *gspca_dev, __s32 val);
+static void setagc(struct gspca_dev *gspca_dev);
static void setawb(struct gspca_dev *gspca_dev);
static void setaec(struct gspca_dev *gspca_dev);
static void setsharpness(struct gspca_dev *gspca_dev);
@@ -189,7 +189,7 @@ static const struct ctrl sd_ctrls[] = {
.step = 1,
.default_value = 1,
},
- .set = sd_setagc
+ .set_control = setagc
},
[AWB] = {
{
@@ -851,6 +851,7 @@ static int sccb_check_status(struct gspca_dev *gspca_dev)
int i;
for (i = 0; i < 5; i++) {
+ msleep(10);
data = ov534_reg_read(gspca_dev, OV534_REG_STATUS);
switch (data) {
@@ -1242,10 +1243,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->ctrls = sd->ctrls;
- /* the auto white balance control works only when auto gain is set */
- if (sd_ctrls[AGC].qctrl.default_value == 0)
- gspca_dev->ctrl_inac |= (1 << AWB);
-
cam->cam_mode = ov772x_mode;
cam->nmodes = ARRAY_SIZE(ov772x_mode);
@@ -1486,29 +1483,6 @@ scan_next:
} while (remaining_len > 0);
}
-static int sd_setagc(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->ctrls[AGC].val = val;
-
- /* the auto white balance control works only
- * when auto gain is set */
- if (val) {
- gspca_dev->ctrl_inac &= ~(1 << AWB);
- } else {
- gspca_dev->ctrl_inac |= (1 << AWB);
- if (sd->ctrls[AWB].val) {
- sd->ctrls[AWB].val = 0;
- if (gspca_dev->streaming)
- setawb(gspca_dev);
- }
- }
- if (gspca_dev->streaming)
- setagc(gspca_dev);
- return gspca_dev->usb_err;
-}
-
static int sd_querymenu(struct gspca_dev *gspca_dev,
struct v4l2_querymenu *menu)
{
diff --git a/drivers/media/video/gspca/ov534_9.c b/drivers/media/video/gspca/ov534_9.c
index b579730..1fd41f0 100644
--- a/drivers/media/video/gspca/ov534_9.c
+++ b/drivers/media/video/gspca/ov534_9.c
@@ -1008,6 +1008,7 @@ static int sccb_check_status(struct gspca_dev *gspca_dev)
int i;
for (i = 0; i < 5; i++) {
+ msleep(10);
data = reg_r(gspca_dev, OV534_REG_STATUS);
switch (data) {
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index 2cb7d95..115da16 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -418,7 +418,7 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
gspca_dev->vdev.ctrl_handler = hdl;
- v4l2_ctrl_handler_init(hdl, 4);
+ v4l2_ctrl_handler_init(hdl, 5);
sd->contrast = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_CONTRAST, 0, 15, 1, 7);
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index ad09820..6c31e46 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -1761,7 +1761,6 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
V4L2_CID_SATURATION, 0, 255, 1, 127);
sd->hue = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_HUE, -180, 180, 1, 0);
- v4l2_ctrl_cluster(4, &sd->brightness);
sd->gamma = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_GAMMA, 0, 255, 1, 0x10);
@@ -1770,7 +1769,6 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
V4L2_CID_BLUE_BALANCE, 0, 127, 1, 0x28);
sd->red = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_RED_BALANCE, 0, 127, 1, 0x28);
- v4l2_ctrl_cluster(2, &sd->blue);
if (sd->sensor != SENSOR_OV9655 && sd->sensor != SENSOR_SOI968 &&
sd->sensor != SENSOR_OV7670 && sd->sensor != SENSOR_MT9M001 &&
@@ -1779,7 +1777,6 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
V4L2_CID_HFLIP, 0, 1, 1, 0);
sd->vflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
- v4l2_ctrl_cluster(2, &sd->hflip);
}
if (sd->sensor != SENSOR_SOI968 && sd->sensor != SENSOR_MT9VPRB &&
@@ -1794,6 +1791,20 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
V4L2_CID_GAIN, 0, 28, 1, 0);
sd->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ }
+
+ sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY, 50, 90, 1, 80);
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+
+ v4l2_ctrl_cluster(4, &sd->brightness);
+ v4l2_ctrl_cluster(2, &sd->blue);
+ if (sd->hflip)
+ v4l2_ctrl_cluster(2, &sd->hflip);
+ if (sd->autogain) {
if (sd->sensor == SENSOR_SOI968)
/* this sensor doesn't have the exposure control and
autogain is clustered with gain instead. This works
@@ -1803,13 +1814,6 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
/* Otherwise autogain is clustered with exposure. */
v4l2_ctrl_auto_cluster(2, &sd->autogain, 0, false);
}
-
- sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
- V4L2_CID_JPEG_COMPRESSION_QUALITY, 50, 90, 1, 80);
- if (hdl->error) {
- pr_err("Could not initialize controls\n");
- return hdl->error;
- }
return 0;
}
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 4d1696d..f38faa9 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -3120,7 +3120,7 @@ static const struct sd_desc sd_desc = {
| (SENSOR_ ## sensor << 8) \
| (flags)
static const struct usb_device_id device_table[] = {
- {USB_DEVICE(0x0458, 0x7025), BS(SN9C120, MI0360)},
+ {USB_DEVICE(0x0458, 0x7025), BSF(SN9C120, MI0360B, F_PDN_INV)},
{USB_DEVICE(0x0458, 0x702e), BS(SN9C120, OV7660)},
{USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, F_PDN_INV)},
{USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, F_PDN_INV)},
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index 057929e..5462ce2 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -866,10 +866,10 @@ static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *pdev,
pci_write_config_dword(pdev, 0x40, 0xffff);
IVTV_DEBUG_INFO("%d (rev %d) at %02x:%02x.%x, "
- "irq: %d, latency: %d, memory: 0x%lx\n",
+ "irq: %d, latency: %d, memory: 0x%llx\n",
pdev->device, pdev->revision, pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
- pdev->irq, pci_latency, (unsigned long)itv->base_addr);
+ pdev->irq, pci_latency, (u64)itv->base_addr);
return 0;
}
@@ -1007,7 +1007,7 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
itv->cxhdl.priv = itv;
itv->cxhdl.func = ivtv_api_func;
- IVTV_DEBUG_INFO("base addr: 0x%08x\n", itv->base_addr);
+ IVTV_DEBUG_INFO("base addr: 0x%llx\n", (u64)itv->base_addr);
/* PCI Device Setup */
retval = ivtv_setup_pci(itv, pdev, pci_id);
@@ -1017,8 +1017,8 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
goto free_mem;
/* map io memory */
- IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
- itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE);
+ IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+ (u64)itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE);
itv->enc_mem = ioremap_nocache(itv->base_addr + IVTV_ENCODER_OFFSET,
IVTV_ENCODER_SIZE);
if (!itv->enc_mem) {
@@ -1034,8 +1034,8 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
}
if (itv->has_cx23415) {
- IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
- itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
+ IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+ (u64)itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
itv->dec_mem = ioremap_nocache(itv->base_addr + IVTV_DECODER_OFFSET,
IVTV_DECODER_SIZE);
if (!itv->dec_mem) {
@@ -1056,8 +1056,8 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
}
/* map registers memory */
- IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
- itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
+ IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+ (u64)itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
itv->reg_mem =
ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
if (!itv->reg_mem) {
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index 2e22002..a7e00f8 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -622,7 +622,7 @@ struct ivtv {
struct v4l2_subdev *sd_video; /* controlling video decoder subdev */
struct v4l2_subdev *sd_audio; /* controlling audio subdev */
struct v4l2_subdev *sd_muxer; /* controlling audio muxer subdev */
- u32 base_addr; /* PCI resource base address */
+ resource_size_t base_addr; /* PCI resource base address */
volatile void __iomem *enc_mem; /* pointer to mapped encoder memory */
volatile void __iomem *dec_mem; /* pointer to mapped decoder memory */
volatile void __iomem *reg_mem; /* pointer to mapped registers */
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
index d2dec58..3945556 100644
--- a/drivers/media/video/mem2mem_testdev.c
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -110,22 +110,6 @@ enum {
V4L2_M2M_DST = 1,
};
-/* Source and destination queue data */
-static struct m2mtest_q_data q_data[2];
-
-static struct m2mtest_q_data *get_q_data(enum v4l2_buf_type type)
-{
- switch (type) {
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- return &q_data[V4L2_M2M_SRC];
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- return &q_data[V4L2_M2M_DST];
- default:
- BUG();
- }
- return NULL;
-}
-
#define V4L2_CID_TRANS_TIME_MSEC V4L2_CID_PRIVATE_BASE
#define V4L2_CID_TRANS_NUM_BUFS (V4L2_CID_PRIVATE_BASE + 1)
@@ -198,8 +182,26 @@ struct m2mtest_ctx {
int aborting;
struct v4l2_m2m_ctx *m2m_ctx;
+
+ /* Source and destination queue data */
+ struct m2mtest_q_data q_data[2];
};
+static struct m2mtest_q_data *get_q_data(struct m2mtest_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &ctx->q_data[V4L2_M2M_SRC];
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &ctx->q_data[V4L2_M2M_DST];
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+
static struct v4l2_queryctrl *get_ctrl(int id)
{
int i;
@@ -223,7 +225,7 @@ static int device_process(struct m2mtest_ctx *ctx,
int tile_w, bytes_left;
int width, height, bytesperline;
- q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
width = q_data->width;
height = q_data->height;
@@ -436,7 +438,7 @@ static int vidioc_g_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
if (!vq)
return -EINVAL;
- q_data = get_q_data(f->type);
+ q_data = get_q_data(ctx, f->type);
f->fmt.pix.width = q_data->width;
f->fmt.pix.height = q_data->height;
@@ -535,7 +537,7 @@ static int vidioc_s_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
if (!vq)
return -EINVAL;
- q_data = get_q_data(f->type);
+ q_data = get_q_data(ctx, f->type);
if (!q_data)
return -EINVAL;
@@ -747,7 +749,7 @@ static int m2mtest_queue_setup(struct vb2_queue *vq,
struct m2mtest_q_data *q_data;
unsigned int size, count = *nbuffers;
- q_data = get_q_data(vq->type);
+ q_data = get_q_data(ctx, vq->type);
size = q_data->width * q_data->height * q_data->fmt->depth >> 3;
@@ -775,7 +777,7 @@ static int m2mtest_buf_prepare(struct vb2_buffer *vb)
dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
- q_data = get_q_data(vb->vb2_queue->type);
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n",
@@ -860,6 +862,9 @@ static int m2mtest_open(struct file *file)
ctx->transtime = MEM2MEM_DEF_TRANSTIME;
ctx->num_processed = 0;
+ ctx->q_data[V4L2_M2M_SRC].fmt = &formats[0];
+ ctx->q_data[V4L2_M2M_DST].fmt = &formats[0];
+
ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
if (IS_ERR(ctx->m2m_ctx)) {
@@ -986,9 +991,6 @@ static int m2mtest_probe(struct platform_device *pdev)
goto err_m2m;
}
- q_data[V4L2_M2M_SRC].fmt = &formats[0];
- q_data[V4L2_M2M_DST].fmt = &formats[0];
-
return 0;
v4l2_m2m_release(dev->m2m_dev);
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c
index ded26b7..41f9a25 100644
--- a/drivers/media/video/mx2_camera.c
+++ b/drivers/media/video/mx2_camera.c
@@ -345,19 +345,6 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
PRP_INTR_CH2OVF,
}
},
- {
- .in_fmt = V4L2_MBUS_FMT_UYVY8_2X8,
- .out_fmt = V4L2_PIX_FMT_YUV420,
- .cfg = {
- .channel = 2,
- .in_fmt = PRP_CNTL_DATA_IN_YUV422,
- .out_fmt = PRP_CNTL_CH2_OUT_YUV420,
- .src_pixel = 0x22000888, /* YUV422 (YUYV) */
- .irq_flags = PRP_INTR_RDERR | PRP_INTR_CH2WERR |
- PRP_INTR_CH2FC | PRP_INTR_LBOVF |
- PRP_INTR_CH2OVF,
- }
- },
};
static struct mx2_fmt_cfg *mx27_emma_prp_get_format(
@@ -984,7 +971,6 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
- const struct soc_camera_format_xlate *xlate;
unsigned long common_flags;
int ret;
int bytesperline;
@@ -1029,31 +1015,14 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
return ret;
}
- xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
- if (!xlate) {
- dev_warn(icd->parent, "Format %x not found\n", pixfmt);
- return -EINVAL;
- }
-
- if (xlate->code == V4L2_MBUS_FMT_YUYV8_2X8) {
- csicr1 |= CSICR1_PACK_DIR;
- csicr1 &= ~CSICR1_SWAP16_EN;
- dev_dbg(icd->parent, "already yuyv format, don't convert\n");
- } else if (xlate->code == V4L2_MBUS_FMT_UYVY8_2X8) {
- csicr1 &= ~CSICR1_PACK_DIR;
- csicr1 |= CSICR1_SWAP16_EN;
- dev_dbg(icd->parent, "convert uyvy mbus format into yuyv\n");
- } else {
- dev_warn(icd->parent, "mbus format not supported\n");
- return -EINVAL;
- }
-
if (common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
csicr1 |= CSICR1_REDGE;
if (common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
csicr1 |= CSICR1_SOF_POL;
if (common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
csicr1 |= CSICR1_HSYNC_POL;
+ if (pcdev->platform_flags & MX2_CAMERA_SWAP16)
+ csicr1 |= CSICR1_SWAP16_EN;
if (pcdev->platform_flags & MX2_CAMERA_EXT_VSYNC)
csicr1 |= CSICR1_EXT_VSYNC;
if (pcdev->platform_flags & MX2_CAMERA_CCIR)
@@ -1064,6 +1033,8 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
csicr1 |= CSICR1_GCLK_MODE;
if (pcdev->platform_flags & MX2_CAMERA_INV_DATA)
csicr1 |= CSICR1_INV_DATA;
+ if (pcdev->platform_flags & MX2_CAMERA_PACK_DIR_MSB)
+ csicr1 |= CSICR1_PACK_DIR;
pcdev->csicr1 = csicr1;
@@ -1138,8 +1109,7 @@ static int mx2_camera_get_formats(struct soc_camera_device *icd,
return 0;
}
- if (code == V4L2_MBUS_FMT_YUYV8_2X8 ||
- code == V4L2_MBUS_FMT_UYVY8_2X8) {
+ if (code == V4L2_MBUS_FMT_YUYV8_2X8) {
formats++;
if (xlate) {
/*
@@ -1155,18 +1125,6 @@ static int mx2_camera_get_formats(struct soc_camera_device *icd,
}
}
- if (code == V4L2_MBUS_FMT_UYVY8_2X8) {
- formats++;
- if (xlate) {
- xlate->host_fmt =
- soc_mbus_get_fmtdesc(V4L2_MBUS_FMT_YUYV8_2X8);
- xlate->code = code;
- dev_dbg(dev, "Providing host format %s for sensor code %d\n",
- xlate->host_fmt->name, code);
- xlate++;
- }
- }
-
/* Generic pass-trough */
formats++;
if (xlate) {
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index af2d908..c370c2d 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -29,6 +29,7 @@
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/mutex.h>
+#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/isa.h>
#include <asm/io.h>
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index fedcd56..92fc5a2 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -615,7 +615,7 @@ int fimc_ctrls_create(struct fimc_ctx *ctx)
ctx->effect.type = FIMC_REG_CIIMGEFF_FIN_BYPASS;
if (!handler->error) {
- v4l2_ctrl_cluster(3, &ctrls->colorfx);
+ v4l2_ctrl_cluster(2, &ctrls->colorfx);
ctrls->ready = true;
}
diff --git a/drivers/media/video/s5p-mfc/regs-mfc.h b/drivers/media/video/s5p-mfc/regs-mfc.h
index 053a8a8..a19bece 100644
--- a/drivers/media/video/s5p-mfc/regs-mfc.h
+++ b/drivers/media/video/s5p-mfc/regs-mfc.h
@@ -164,10 +164,15 @@
decoded pic */
#define S5P_FIMV_SI_DISPLAY_Y_ADR 0x2010 /* luma addr of displayed pic */
#define S5P_FIMV_SI_DISPLAY_C_ADR 0x2014 /* chroma addrof displayed pic */
+
#define S5P_FIMV_SI_CONSUMED_BYTES 0x2018 /* Consumed number of bytes to
decode a frame */
#define S5P_FIMV_SI_DISPLAY_STATUS 0x201c /* status of decoded picture */
+#define S5P_FIMV_SI_DECODE_Y_ADR 0x2024 /* luma addr of decoded pic */
+#define S5P_FIMV_SI_DECODE_C_ADR 0x2028 /* chroma addrof decoded pic */
+#define S5P_FIMV_SI_DECODE_STATUS 0x202c /* status of decoded picture */
+
#define S5P_FIMV_SI_CH0_SB_ST_ADR 0x2044 /* start addr of stream buf */
#define S5P_FIMV_SI_CH0_SB_FRM_SIZE 0x2048 /* size of stream buf */
#define S5P_FIMV_SI_CH0_DESC_ADR 0x204c /* addr of descriptor buf */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
index c25ec02..4dd32fc 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
@@ -627,13 +627,13 @@ static int s5p_mfc_dec_s_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY:
- ctx->loop_filter_mpeg4 = ctrl->val;
+ ctx->display_delay = ctrl->val;
break;
case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE:
ctx->display_delay_enable = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
- ctx->display_delay = ctrl->val;
+ ctx->loop_filter_mpeg4 = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
ctx->slice_interface = ctrl->val;
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
index acedb20..03d8334 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
@@ -243,12 +243,6 @@ static struct mfc_control controls[] = {
.minimum = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
.maximum = V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
.default_value = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
- .menu_skip_mask = ~(
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_2) |
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_0) |
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_1)
- ),
},
{
.id = V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
@@ -494,7 +488,7 @@ static struct mfc_control controls[] = {
.type = V4L2_CTRL_TYPE_MENU,
.minimum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED,
.maximum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED,
- .default_value = 0,
+ .default_value = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED,
.menu_skip_mask = 0,
},
{
@@ -534,7 +528,7 @@ static struct mfc_control controls[] = {
.type = V4L2_CTRL_TYPE_MENU,
.minimum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
.maximum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE,
- .default_value = 0,
+ .default_value = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
.menu_skip_mask = 0,
},
{
@@ -907,6 +901,8 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
mfc_err("failed to try output format\n");
return -EINVAL;
}
+ v4l_bound_align_image(&pix_fmt_mp->width, 8, 1920, 1,
+ &pix_fmt_mp->height, 4, 1080, 1, 0);
} else {
mfc_err("invalid buf type\n");
return -EINVAL;
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_opr.h b/drivers/media/video/s5p-mfc/s5p_mfc_opr.h
index db83836..5932d1c 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_opr.h
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_opr.h
@@ -57,10 +57,12 @@ void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq);
S5P_FIMV_SI_DISPLAY_Y_ADR) << \
MFC_OFFSET_SHIFT)
#define s5p_mfc_get_dec_y_adr() (readl(dev->regs_base + \
- S5P_FIMV_SI_DISPLAY_Y_ADR) << \
+ S5P_FIMV_SI_DECODE_Y_ADR) << \
MFC_OFFSET_SHIFT)
#define s5p_mfc_get_dspl_status() readl(dev->regs_base + \
S5P_FIMV_SI_DISPLAY_STATUS)
+#define s5p_mfc_get_dec_status() readl(dev->regs_base + \
+ S5P_FIMV_SI_DECODE_STATUS)
#define s5p_mfc_get_frame_type() (readl(dev->regs_base + \
S5P_FIMV_DECODE_FRAME_TYPE) \
& S5P_FIMV_DECODE_FRAME_MASK)
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_shm.h b/drivers/media/video/s5p-mfc/s5p_mfc_shm.h
index 764eac6..cf962a4 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_shm.h
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_shm.h
@@ -13,8 +13,7 @@
#ifndef S5P_MFC_SHM_H_
#define S5P_MFC_SHM_H_
-enum MFC_SHM_OFS
-{
+enum MFC_SHM_OFS {
EXTENEDED_DECODE_STATUS = 0x00, /* D */
SET_FRAME_TAG = 0x04, /* D */
GET_FRAME_TAG_TOP = 0x08, /* D */
diff --git a/drivers/media/video/smiapp/Kconfig b/drivers/media/video/smiapp/Kconfig
index f7b35ff..fb99ff1 100644
--- a/drivers/media/video/smiapp/Kconfig
+++ b/drivers/media/video/smiapp/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_SMIAPP
tristate "SMIA++/SMIA sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAVE_CLK
select VIDEO_SMIAPP_PLL
---help---
This is a generic driver for SMIA++/SMIA camera modules.
diff --git a/drivers/media/video/smiapp/smiapp-core.c b/drivers/media/video/smiapp/smiapp-core.c
index f518026..e8c93c8 100644
--- a/drivers/media/video/smiapp/smiapp-core.c
+++ b/drivers/media/video/smiapp/smiapp-core.c
@@ -32,6 +32,7 @@
#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
#include <linux/v4l2-mediabus.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index 3e050e1..1ad5ab6 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -1178,7 +1178,7 @@ static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
return 0;
if (vt->type == t->mode && analog_ops->get_afc)
vt->afc = analog_ops->get_afc(&t->fe);
- if (t->mode != V4L2_TUNER_RADIO) {
+ if (vt->type != V4L2_TUNER_RADIO) {
vt->capability |= V4L2_TUNER_CAP_NORM;
vt->rangelow = tv_range[0] * 16;
vt->rangehigh = tv_range[1] * 16;
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 5ccbd46..83dbb2d 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -656,7 +656,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd);
SET_VALID_IOCTL(ops, VIDIOC_DECODER_CMD, vidioc_decoder_cmd);
SET_VALID_IOCTL(ops, VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd);
- if (ops->vidioc_g_parm || vdev->current_norm)
+ if (ops->vidioc_g_parm || vdev->vfl_type == VFL_TYPE_GRABBER)
set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
SET_VALID_IOCTL(ops, VIDIOC_G_TUNER, vidioc_g_tuner);
@@ -679,6 +679,8 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_PRESET, vidioc_query_dv_preset);
SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings);
SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings);
+ SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings);
/* yes, really vidioc_subscribe_event */
SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event);
SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event);
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 91be4e8..d7fa896 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -1680,6 +1680,7 @@ static long __video_do_ioctl(struct file *file,
break;
ret = 0;
+ p->parm.capture.readbuffers = 2;
if (ops->vidioc_g_std)
ret = ops->vidioc_g_std(file, fh, &std);
if (ret == 0)
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index 4d7391e..aae1720 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -2561,7 +2561,7 @@ static int vino_acquire_input(struct vino_channel_settings *vcs)
} else if (vino_drvdata->decoder
&& (vino_drvdata->decoder_owner == VINO_NO_CHANNEL)) {
int input;
- int data_norm;
+ int data_norm = 0;
v4l2_std_id norm;
input = VINO_INPUT_COMPOSITE;
@@ -2651,7 +2651,7 @@ static int vino_set_input(struct vino_channel_settings *vcs, int input)
}
if (vino_drvdata->decoder_owner == vcs->channel) {
- int data_norm;
+ int data_norm = 0;
v4l2_std_id norm;
ret = decoder_call(video, s_routing,
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index 0960d7f..08c1024 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -1149,10 +1149,14 @@ static ssize_t
vivi_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
{
struct vivi_dev *dev = video_drvdata(file);
+ int err;
dprintk(dev, 1, "read called\n");
- return vb2_read(&dev->vb_vidq, data, count, ppos,
+ mutex_lock(&dev->mutex);
+ err = vb2_read(&dev->vb_vidq, data, count, ppos,
file->f_flags & O_NONBLOCK);
+ mutex_unlock(&dev->mutex);
+ return err;
}
static unsigned int
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index 373f423..947a06a 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -6,7 +6,7 @@
*
* License Terms: GNU General Public License, version 2
* Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
- * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics
+ * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
*/
#include <linux/i2c.h>
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index afd4590..9edfe86 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -4,7 +4,7 @@
* Copyright (C) ST Microelectronics SA 2011
*
* License Terms: GNU General Public License, version 2
- * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics
+ * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
*/
#include <linux/spi/spi.h>
@@ -146,4 +146,4 @@ module_exit(stmpe_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver");
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 93936f1..23f5463 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -835,7 +835,7 @@ static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots,
struct mei_cl *cl,
struct mei_io_list *cmpl_list)
{
- if ((*slots * sizeof(u32)) >= (sizeof(struct mei_msg_hdr) +
+ if ((*slots * sizeof(u32)) < (sizeof(struct mei_msg_hdr) +
sizeof(struct hbm_flow_control))) {
/* return the cancel routine */
list_del(&cb_pos->cb_list);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index c703332..7de1389 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -982,7 +982,7 @@ static int __devinit mei_probe(struct pci_dev *pdev,
err = request_threaded_irq(pdev->irq,
NULL,
mei_interrupt_thread_handler,
- 0, mei_driver_name, dev);
+ IRQF_ONESHOT, mei_driver_name, dev);
else
err = request_threaded_irq(pdev->irq,
mei_interrupt_quick_handler,
@@ -992,7 +992,7 @@ static int __devinit mei_probe(struct pci_dev *pdev,
if (err) {
dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
pdev->irq);
- goto unmap_memory;
+ goto disable_msi;
}
INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
if (mei_hw_init(dev)) {
@@ -1023,8 +1023,8 @@ release_irq:
mei_disable_interrupts(dev);
flush_scheduled_work();
free_irq(pdev->irq, dev);
+disable_msi:
pci_disable_msi(pdev);
-unmap_memory:
pci_iounmap(pdev, dev->mem_addr);
free_device:
kfree(dev);
@@ -1101,6 +1101,8 @@ static void __devexit mei_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
+
+ misc_deregister(&mei_misc_device);
}
#ifdef CONFIG_PM
static int mei_pci_suspend(struct device *device)
@@ -1216,7 +1218,6 @@ module_init(mei_init_module);
*/
static void __exit mei_exit_module(void)
{
- misc_deregister(&mei_misc_device);
pci_unregister_driver(&mei_driver);
pr_debug("unloaded successfully.\n");
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 6be5605..e2ec050 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -341,7 +341,7 @@ static const struct watchdog_ops wd_ops = {
};
static const struct watchdog_info wd_info = {
.identity = INTEL_AMT_WATCHDOG_ID,
- .options = WDIOF_KEEPALIVEPING,
+ .options = WDIOF_KEEPALIVEPING | WDIOF_ALARMONLY,
};
static struct watchdog_device amt_wd_dev = {
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index dd2d374..276d21c 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -554,7 +554,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
struct mmc_request mrq = {NULL};
struct mmc_command cmd = {0};
struct mmc_data data = {0};
- unsigned int timeout_us;
struct scatterlist sg;
@@ -574,23 +573,12 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
- data.timeout_ns = card->csd.tacc_ns * 100;
- data.timeout_clks = card->csd.tacc_clks * 100;
-
- timeout_us = data.timeout_ns / 1000;
- timeout_us += data.timeout_clks * 1000 /
- (card->host->ios.clock / 1000);
-
- if (timeout_us > 100000) {
- data.timeout_ns = 100000000;
- data.timeout_clks = 0;
- }
-
data.blksz = 4;
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
data.sg_len = 1;
+ mmc_set_data_timeout(&data, card);
mrq.cmd = &cmd;
mrq.data = &data;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 2d4a4b7..258b203 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1326,7 +1326,7 @@ static int mmc_suspend(struct mmc_host *host)
if (!err)
mmc_card_set_sleep(host->card);
} else if (!mmc_host_is_spi(host))
- mmc_deselect_cards(host);
+ err = mmc_deselect_cards(host);
host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
mmc_release_host(host);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index c272c686..b2b43f6 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1075,16 +1075,18 @@ static void mmc_sd_detect(struct mmc_host *host)
*/
static int mmc_sd_suspend(struct mmc_host *host)
{
+ int err = 0;
+
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
if (!mmc_host_is_spi(host))
- mmc_deselect_cards(host);
+ err = mmc_deselect_cards(host);
host->card->state &= ~MMC_STATE_HIGHSPEED;
mmc_release_host(host);
- return 0;
+ return err;
}
/*
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 13d0e95..41c5fd8 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -218,6 +218,12 @@ static int sdio_enable_wide(struct mmc_card *card)
if (ret)
return ret;
+ if ((ctrl & SDIO_BUS_WIDTH_MASK) == SDIO_BUS_WIDTH_RESERVED)
+ pr_warning("%s: SDIO_CCCR_IF is invalid: 0x%02x\n",
+ mmc_hostname(card->host), ctrl);
+
+ /* set as 4-bit bus width */
+ ctrl &= ~SDIO_BUS_WIDTH_MASK;
ctrl |= SDIO_BUS_WIDTH_4BIT;
ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h
index 787aba1..ab56f7d 100644
--- a/drivers/mmc/host/atmel-mci-regs.h
+++ b/drivers/mmc/host/atmel-mci-regs.h
@@ -140,4 +140,18 @@
#define atmci_writel(port,reg,value) \
__raw_writel((value), (port)->regs + reg)
+/*
+ * Fix sconfig's burst size according to atmel MCI. We need to convert them as:
+ * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
+ *
+ * This can be done by finding most significant bit set.
+ */
+static inline unsigned int atmci_convert_chksize(unsigned int maxburst)
+{
+ if (maxburst > 1)
+ return fls(maxburst) - 2;
+ else
+ return 0;
+}
+
#endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 420aca6..f2c115e 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -910,6 +910,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
enum dma_data_direction direction;
enum dma_transfer_direction slave_dirn;
unsigned int sglen;
+ u32 maxburst;
u32 iflags;
data->error = -EINPROGRESS;
@@ -943,17 +944,18 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
if (!chan)
return -ENODEV;
- if (host->caps.has_dma)
- atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN);
-
if (data->flags & MMC_DATA_READ) {
direction = DMA_FROM_DEVICE;
host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
+ maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst);
} else {
direction = DMA_TO_DEVICE;
host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
+ maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst);
}
+ atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) | ATMCI_DMAEN);
+
sglen = dma_map_sg(chan->device->dev, data->sg,
data->sg_len, direction);
@@ -2314,6 +2316,8 @@ static int __init atmci_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
+ setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
+
/* We need at least one slot to succeed */
nr_slots = 0;
ret = -ENODEV;
@@ -2352,8 +2356,6 @@ static int __init atmci_probe(struct platform_device *pdev)
}
}
- setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
-
dev_info(&pdev->dev,
"Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
host->mapbase, irq, nr_slots);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 9bbf45f..1ca5e72 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -418,6 +418,8 @@ static int dw_mci_idmac_init(struct dw_mci *host)
p->des3 = host->sg_dma;
p->des0 = IDMAC_DES0_ER;
+ mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
+
/* Mask out interrupts - get Tx & Rx complete only */
mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
SDMMC_IDMAC_INT_TI);
@@ -615,14 +617,15 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
u32 div;
if (slot->clock != host->current_speed) {
- if (host->bus_hz % slot->clock)
+ div = host->bus_hz / slot->clock;
+ if (host->bus_hz % slot->clock && host->bus_hz > slot->clock)
/*
* move the + 1 after the divide to prevent
* over-clocking the card.
*/
- div = ((host->bus_hz / slot->clock) >> 1) + 1;
- else
- div = (host->bus_hz / slot->clock) >> 1;
+ div += 1;
+
+ div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0;
dev_info(&slot->mmc->class_dev,
"Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
@@ -939,8 +942,8 @@ static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd
mdelay(20);
if (cmd->data) {
- host->data = NULL;
dw_mci_stop_dma(host);
+ host->data = NULL;
}
}
}
@@ -1623,7 +1626,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
- set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
host->dma_ops->complete(host);
}
#endif
@@ -1725,7 +1727,8 @@ static void dw_mci_work_routine_card(struct work_struct *work)
#ifdef CONFIG_MMC_DW_IDMAC
ctrl = mci_readl(host, BMOD);
- ctrl |= 0x01; /* Software reset of DMA */
+ /* Software reset of DMA */
+ ctrl |= SDMMC_IDMAC_SWRESET;
mci_writel(host, BMOD, ctrl);
#endif
@@ -1950,10 +1953,6 @@ int dw_mci_probe(struct dw_mci *host)
spin_lock_init(&host->lock);
INIT_LIST_HEAD(&host->queue);
-
- host->dma_ops = host->pdata->dma_ops;
- dw_mci_init_dma(host);
-
/*
* Get the host data width - this assumes that HCON has been set with
* the correct values.
@@ -1981,10 +1980,11 @@ int dw_mci_probe(struct dw_mci *host)
}
/* Reset all blocks */
- if (!mci_wait_reset(&host->dev, host)) {
- ret = -ENODEV;
- goto err_dmaunmap;
- }
+ if (!mci_wait_reset(&host->dev, host))
+ return -ENODEV;
+
+ host->dma_ops = host->pdata->dma_ops;
+ dw_mci_init_dma(host);
/* Clear the interrupts for the host controller */
mci_writel(host, RINTSTS, 0xFFFFFFFF);
@@ -2170,14 +2170,14 @@ int dw_mci_resume(struct dw_mci *host)
if (host->vmmc)
regulator_enable(host->vmmc);
- if (host->dma_ops->init)
- host->dma_ops->init(host);
-
if (!mci_wait_reset(&host->dev, host)) {
ret = -ENODEV;
return ret;
}
+ if (host->dma_ops->init)
+ host->dma_ops->init(host);
+
/* Restore the old value at FIFOTH register */
mci_writel(host, FIFOTH, host->fifoth_val);
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index f0fcce4..50ff19a 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1216,12 +1216,7 @@ static void mmci_dt_populate_generic_pdata(struct device_node *np,
int bus_width = 0;
pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0);
- if (!pdata->gpio_wp)
- pdata->gpio_wp = -1;
-
pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0);
- if (!pdata->gpio_cd)
- pdata->gpio_cd = -1;
if (of_get_property(np, "cd-inverted", NULL))
pdata->cd_invert = true;
@@ -1276,6 +1271,12 @@ static int __devinit mmci_probe(struct amba_device *dev,
return -EINVAL;
}
+ if (!plat) {
+ plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return -ENOMEM;
+ }
+
if (np)
mmci_dt_populate_generic_pdata(np, plat);
@@ -1424,6 +1425,10 @@ static int __devinit mmci_probe(struct amba_device *dev,
writel(0, host->base + MMCIMASK1);
writel(0xfff, host->base + MMCICLEAR);
+ if (plat->gpio_cd == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_gpio_cd;
+ }
if (gpio_is_valid(plat->gpio_cd)) {
ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
if (ret == 0)
@@ -1447,6 +1452,10 @@ static int __devinit mmci_probe(struct amba_device *dev,
if (ret >= 0)
host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
}
+ if (plat->gpio_wp == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_gpio_wp;
+ }
if (gpio_is_valid(plat->gpio_wp)) {
ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
if (ret == 0)
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 34a9026..277161d 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -894,8 +894,8 @@ static struct platform_driver mxs_mmc_driver = {
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &mxs_mmc_pm_ops,
- .of_match_table = mxs_mmc_dt_ids,
#endif
+ .of_match_table = mxs_mmc_dt_ids,
},
};
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 552196c..3e8dcf8 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1300,7 +1300,7 @@ static const struct mmc_host_ops mmc_omap_ops = {
.set_ios = mmc_omap_set_ios,
};
-static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id)
+static int __devinit mmc_omap_new_slot(struct mmc_omap_host *host, int id)
{
struct mmc_omap_slot *slot = NULL;
struct mmc_host *mmc;
@@ -1485,24 +1485,26 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
}
host->nr_slots = pdata->nr_slots;
+ host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
+
+ host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
+ if (!host->mmc_omap_wq)
+ goto err_plat_cleanup;
+
for (i = 0; i < pdata->nr_slots; i++) {
ret = mmc_omap_new_slot(host, i);
if (ret < 0) {
while (--i >= 0)
mmc_omap_remove_slot(host->slots[i]);
- goto err_plat_cleanup;
+ goto err_destroy_wq;
}
}
- host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
-
- host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
- if (!host->mmc_omap_wq)
- goto err_plat_cleanup;
-
return 0;
+err_destroy_wq:
+ destroy_workqueue(host->mmc_omap_wq);
err_plat_cleanup:
if (pdata->cleanup)
pdata->cleanup(&pdev->dev);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 9a7a60a..389a3ee 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -85,7 +85,6 @@
#define BRR_ENABLE (1 << 5)
#define DTO_ENABLE (1 << 20)
#define INIT_STREAM (1 << 1)
-#define ACEN_ACMD12 (1 << 2)
#define DP_SELECT (1 << 21)
#define DDIR (1 << 4)
#define DMA_EN 0x1
@@ -117,7 +116,6 @@
#define OMAP_MMC_MAX_CLOCK 52000000
#define DRIVER_NAME "omap_hsmmc"
-#define AUTO_CMD12 (1 << 0) /* Auto CMD12 support */
/*
* One controller can have multiple slots, like on some omap boards using
* omap.c controller driver. Luckily this is not currently done on any known
@@ -177,7 +175,6 @@ struct omap_hsmmc_host {
int reqs_blocked;
int use_reg;
int req_in_progress;
- unsigned int flags;
struct omap_hsmmc_next next_data;
struct omap_mmc_platform_data *pdata;
@@ -773,8 +770,6 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
cmdtype = 0x3;
cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22);
- if ((host->flags & AUTO_CMD12) && mmc_op_multi(cmd->opcode))
- cmdreg |= ACEN_ACMD12;
if (data) {
cmdreg |= DP_SELECT | MSBS | BCE;
@@ -847,14 +842,11 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
else
data->bytes_xfered = 0;
- if (data->stop && ((!(host->flags & AUTO_CMD12)) || data->error)) {
- omap_hsmmc_start_command(host, data->stop, NULL);
- } else {
- if (data->stop)
- data->stop->resp[0] = OMAP_HSMMC_READ(host->base,
- RSP76);
+ if (!data->stop) {
omap_hsmmc_request_done(host, data->mrq);
+ return;
}
+ omap_hsmmc_start_command(host, data->stop, NULL);
}
/*
@@ -1859,7 +1851,6 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
host->mapbase = res->start + pdata->reg_offset;
host->base = ioremap(host->mapbase, SZ_4K);
host->power_mode = MMC_POWER_OFF;
- host->flags = AUTO_CMD12;
host->next_data.cookie = 1;
platform_set_drvdata(pdev, host);
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 55a164f..a50c205 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -404,7 +404,7 @@ static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc)
if (sc->ext_cd_irq &&
request_threaded_irq(sc->ext_cd_irq, NULL,
sdhci_s3c_gpio_card_detect_thread,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
dev_name(dev), sc) == 0) {
int status = gpio_get_value(sc->ext_cd_gpio);
if (pdata->ext_cd_gpio_invert)
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 1fe32df..423da81 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -4,7 +4,7 @@
* Support of SDHCI platform devices for spear soc family
*
* Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* Inspired by sdhci-pltfm.c
*
@@ -289,5 +289,5 @@ static struct platform_driver sdhci_driver = {
module_platform_driver(sdhci_driver);
MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index e626732..f4b8b4d 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -680,8 +680,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
}
if (count >= 0xF) {
- pr_warning("%s: Too large timeout 0x%x requested for CMD%d!\n",
- mmc_hostname(host->mmc), count, cmd->opcode);
+ DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
+ mmc_hostname(host->mmc), count, cmd->opcode);
count = 0xE;
}
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index ae36d7e..551e316 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -304,32 +304,17 @@ static void find_next_position(struct mtdoops_context *cxt)
}
static void mtdoops_do_dump(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
- const char *s2, unsigned long l2)
+ enum kmsg_dump_reason reason)
{
struct mtdoops_context *cxt = container_of(dumper,
struct mtdoops_context, dump);
- unsigned long s1_start, s2_start;
- unsigned long l1_cpy, l2_cpy;
- char *dst;
-
- if (reason != KMSG_DUMP_OOPS &&
- reason != KMSG_DUMP_PANIC)
- return;
/* Only dump oopses if dump_oops is set */
if (reason == KMSG_DUMP_OOPS && !dump_oops)
return;
- dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */
- l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE);
- l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy);
-
- s2_start = l2 - l2_cpy;
- s1_start = l1 - l1_cpy;
-
- memcpy(dst, s1 + s1_start, l1_cpy);
- memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
+ kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
+ record_size - MTDOOPS_HEADER_SIZE, NULL);
/* Panics must be written immediately */
if (reason != KMSG_DUMP_OOPS)
@@ -375,6 +360,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
return;
}
+ cxt->dump.max_reason = KMSG_DUMP_OOPS;
cxt->dump.dump = mtdoops_do_dump;
err = kmsg_dump_register(&cxt->dump);
if (err) {
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 9f957c2..7c13803 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -264,6 +264,9 @@ static struct dentry *dfs_rootdir;
*/
int ubi_debugfs_init(void)
{
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
dfs_rootdir = debugfs_create_dir("ubi", NULL);
if (IS_ERR_OR_NULL(dfs_rootdir)) {
int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
@@ -281,7 +284,8 @@ int ubi_debugfs_init(void)
*/
void ubi_debugfs_exit(void)
{
- debugfs_remove(dfs_rootdir);
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove(dfs_rootdir);
}
/* Read an UBI debugfs file */
@@ -403,6 +407,9 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
struct dentry *dent;
struct ubi_debug_info *d = ubi->dbg;
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
ubi->ubi_num);
if (n == UBI_DFS_DIR_LEN) {
@@ -470,5 +477,6 @@ out:
*/
void ubi_debugfs_exit_dev(struct ubi_device *ubi)
{
- debugfs_remove_recursive(ubi->dbg->dfs_dir);
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove_recursive(ubi->dbg->dfs_dir);
}
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 9df100a..b6be644 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1262,11 +1262,11 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
vol_id, lnum, ubi->works_count);
- down_write(&ubi->work_sem);
while (found) {
struct ubi_work *wrk;
found = 0;
+ down_read(&ubi->work_sem);
spin_lock(&ubi->wl_lock);
list_for_each_entry(wrk, &ubi->works, list) {
if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
@@ -1277,18 +1277,27 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
spin_unlock(&ubi->wl_lock);
err = wrk->func(ubi, wrk, 0);
- if (err)
- goto out;
+ if (err) {
+ up_read(&ubi->work_sem);
+ return err;
+ }
+
spin_lock(&ubi->wl_lock);
found = 1;
break;
}
}
spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->work_sem);
}
-out:
+ /*
+ * Make sure all the works which have been done in parallel are
+ * finished.
+ */
+ down_write(&ubi->work_sem);
up_write(&ubi->work_sem);
+
return err;
}
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index dd5e048..545c09e 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -936,7 +936,7 @@ static int cops_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct cops_local *lp = netdev_priv(dev);
struct sockaddr_at *sa = (struct sockaddr_at *)&ifr->ifr_addr;
- struct atalk_addr *aa = (struct atalk_addr *)&lp->node_addr;
+ struct atalk_addr *aa = &lp->node_addr;
switch(cmd)
{
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 3463b46..a030e63 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2454,24 +2454,27 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
out:
if (res) {
/* no suitable interface, frame not sent */
- dev_kfree_skb(skb);
+ kfree_skb(skb);
}
return NETDEV_TX_OK;
}
-int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
- struct slave *slave)
+int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
{
int ret = RX_HANDLER_ANOTHER;
+ struct lacpdu *lacpdu, _lacpdu;
+
if (skb->protocol != PKT_TYPE_LACPDU)
return ret;
- if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
+ lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu);
+ if (!lacpdu)
return ret;
read_lock(&bond->lock);
- ret = bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len);
+ ret = bond_3ad_rx_indication(lacpdu, slave, skb->len);
read_unlock(&bond->lock);
return ret;
}
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 5ee7e3c..0cfaa4a 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -274,8 +274,8 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave);
void bond_3ad_handle_link_change(struct slave *slave, char link);
int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
-int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
- struct slave *slave);
+int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave);
int bond_3ad_set_carrier(struct bonding *bond);
void bond_3ad_update_lacp_rate(struct bonding *bond);
#endif //__BOND_3AD_H__
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 0f59c15..e15cc11 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -342,27 +342,17 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
_unlock_rx_hashtbl_bh(bond);
}
-static int rlb_arp_recv(struct sk_buff *skb, struct bonding *bond,
- struct slave *slave)
+static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
{
- struct arp_pkt *arp;
+ struct arp_pkt *arp, _arp;
if (skb->protocol != cpu_to_be16(ETH_P_ARP))
goto out;
- arp = (struct arp_pkt *) skb->data;
- if (!arp) {
- pr_debug("Packet has no ARP data\n");
+ arp = skb_header_pointer(skb, 0, sizeof(_arp), &_arp);
+ if (!arp)
goto out;
- }
-
- if (!pskb_may_pull(skb, arp_hdr_len(bond->dev)))
- goto out;
-
- if (skb->len < sizeof(struct arp_pkt)) {
- pr_debug("Packet is too small to be an ARP\n");
- goto out;
- }
if (arp->op_code == htons(ARPOP_REPLY)) {
/* update rx hash table for this ARP */
@@ -1356,12 +1346,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
}
}
+ read_unlock(&bond->curr_slave_lock);
+
if (res) {
/* no suitable interface, frame not sent */
- dev_kfree_skb(skb);
+ kfree_skb(skb);
}
- read_unlock(&bond->curr_slave_lock);
-
return NETDEV_TX_OK;
}
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 3680aa2..2cf084e 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -6,7 +6,7 @@
#include "bonding.h"
#include "bond_alb.h"
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS)
#include <linux/debugfs.h>
#include <linux/seq_file.h>
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2ee8cf9..4ddcc3e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -76,6 +76,7 @@
#include <net/route.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <net/pkt_sched.h>
#include "bonding.h"
#include "bond_3ad.h"
#include "bond_alb.h"
@@ -381,8 +382,6 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
return next;
}
-#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
-
/**
* bond_dev_queue_xmit - Prepare skb for xmit.
*
@@ -395,7 +394,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
{
skb->dev = slave_dev;
- skb->queue_mapping = bond_queue_mapping(skb);
+ BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
+ sizeof(qdisc_skb_cb(skb)->bond_queue_mapping));
+ skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping;
if (unlikely(netpoll_tx_running(slave_dev)))
bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
@@ -1444,8 +1445,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
struct sk_buff *skb = *pskb;
struct slave *slave;
struct bonding *bond;
- int (*recv_probe)(struct sk_buff *, struct bonding *,
- struct slave *);
+ int (*recv_probe)(const struct sk_buff *, struct bonding *,
+ struct slave *);
int ret = RX_HANDLER_ANOTHER;
skb = skb_share_check(skb, GFP_ATOMIC);
@@ -1462,15 +1463,10 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
recv_probe = ACCESS_ONCE(bond->recv_probe);
if (recv_probe) {
- struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
-
- if (likely(nskb)) {
- ret = recv_probe(nskb, bond, slave);
- dev_kfree_skb(nskb);
- if (ret == RX_HANDLER_CONSUMED) {
- consume_skb(skb);
- return ret;
- }
+ ret = recv_probe(skb, bond, slave);
+ if (ret == RX_HANDLER_CONSUMED) {
+ consume_skb(skb);
+ return ret;
}
}
@@ -2737,25 +2733,31 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
}
}
-static int bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
- struct slave *slave)
+static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
{
- struct arphdr *arp;
+ struct arphdr *arp = (struct arphdr *)skb->data;
unsigned char *arp_ptr;
__be32 sip, tip;
+ int alen;
if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
return RX_HANDLER_ANOTHER;
read_lock(&bond->lock);
+ alen = arp_hdr_len(bond->dev);
pr_debug("bond_arp_rcv: bond %s skb->dev %s\n",
bond->dev->name, skb->dev->name);
- if (!pskb_may_pull(skb, arp_hdr_len(bond->dev)))
- goto out_unlock;
+ if (alen > skb_headlen(skb)) {
+ arp = kmalloc(alen, GFP_ATOMIC);
+ if (!arp)
+ goto out_unlock;
+ if (skb_copy_bits(skb, 0, arp, alen) < 0)
+ goto out_unlock;
+ }
- arp = arp_hdr(skb);
if (arp->ar_hln != bond->dev->addr_len ||
skb->pkt_type == PACKET_OTHERHOST ||
skb->pkt_type == PACKET_LOOPBACK ||
@@ -2790,6 +2792,8 @@ static int bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
out_unlock:
read_unlock(&bond->lock);
+ if (arp != (struct arphdr *)skb->data)
+ kfree(arp);
return RX_HANDLER_ANOTHER;
}
@@ -3226,6 +3230,12 @@ static int bond_master_netdev_event(unsigned long event,
switch (event) {
case NETDEV_CHANGENAME:
return bond_event_changename(event_bond);
+ case NETDEV_UNREGISTER:
+ bond_remove_proc_entry(event_bond);
+ break;
+ case NETDEV_REGISTER:
+ bond_create_proc_entry(event_bond);
+ break;
default:
break;
}
@@ -3986,7 +3996,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
out:
if (res) {
/* no suitable interface, frame not sent */
- dev_kfree_skb(skb);
+ kfree_skb(skb);
}
return NETDEV_TX_OK;
@@ -4008,11 +4018,11 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
res = bond_dev_queue_xmit(bond, skb,
bond->curr_active_slave->dev);
+ read_unlock(&bond->curr_slave_lock);
+
if (res)
/* no suitable interface, frame not sent */
- dev_kfree_skb(skb);
-
- read_unlock(&bond->curr_slave_lock);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -4051,7 +4061,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
if (res) {
/* no suitable interface, frame not sent */
- dev_kfree_skb(skb);
+ kfree_skb(skb);
}
return NETDEV_TX_OK;
@@ -4089,7 +4099,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
res = bond_dev_queue_xmit(bond, skb2, tx_dev);
if (res) {
- dev_kfree_skb(skb2);
+ kfree_skb(skb2);
continue;
}
}
@@ -4103,7 +4113,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
out:
if (res)
/* no suitable interface, frame not sent */
- dev_kfree_skb(skb);
+ kfree_skb(skb);
/* frame sent to all suitable interfaces */
return NETDEV_TX_OK;
@@ -4171,7 +4181,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
/*
* Save the original txq to restore before passing to the driver
*/
- bond_queue_mapping(skb) = skb->queue_mapping;
+ qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping;
if (unlikely(txq >= dev->real_num_tx_queues)) {
do {
@@ -4209,7 +4219,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
pr_err("%s: Error: Unknown bonding mode %d\n",
dev->name, bond->params.mode);
WARN_ON_ONCE(1);
- dev_kfree_skb(skb);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
}
@@ -4231,7 +4241,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (bond->slave_cnt)
ret = __bond_start_xmit(skb, dev);
else
- dev_kfree_skb(skb);
+ kfree_skb(skb);
read_unlock(&bond->lock);
@@ -4410,8 +4420,6 @@ static void bond_uninit(struct net_device *bond_dev)
bond_work_cancel_all(bond);
- bond_remove_proc_entry(bond);
-
bond_debug_unregister(bond);
__hw_addr_flush(&bond->mc_list);
@@ -4813,7 +4821,6 @@ static int bond_init(struct net_device *bond_dev)
bond_set_lockdep_class(bond_dev);
- bond_create_proc_entry(bond);
list_add_tail(&bond->bond_list, &bn->dev_list);
bond_prepare_sysfs_group(bond);
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index ad284ba..3cea38d 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -150,14 +150,25 @@ static void bond_info_show_master(struct seq_file *seq)
}
}
+static const char *bond_slave_link_status(s8 link)
+{
+ static const char * const status[] = {
+ [BOND_LINK_UP] = "up",
+ [BOND_LINK_FAIL] = "going down",
+ [BOND_LINK_DOWN] = "down",
+ [BOND_LINK_BACK] = "going back",
+ };
+
+ return status[link];
+}
+
static void bond_info_show_slave(struct seq_file *seq,
const struct slave *slave)
{
struct bonding *bond = seq->private;
seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
- seq_printf(seq, "MII Status: %s\n",
- (slave->link == BOND_LINK_UP) ? "up" : "down");
+ seq_printf(seq, "MII Status: %s\n", bond_slave_link_status(slave->link));
if (slave->speed == SPEED_UNKNOWN)
seq_printf(seq, "Speed: %s\n", "Unknown");
else
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index aef42f0..485bedb 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1082,8 +1082,12 @@ static ssize_t bonding_store_primary(struct device *d,
}
}
- pr_info("%s: Unable to set %.*s as primary slave.\n",
- bond->dev->name, (int)strlen(buf) - 1, buf);
+ strncpy(bond->params.primary, ifname, IFNAMSIZ);
+ bond->params.primary[IFNAMSIZ - 1] = 0;
+
+ pr_info("%s: Recording %s as primary, "
+ "but it has not been enslaved to %s yet.\n",
+ bond->dev->name, ifname, bond->dev->name);
out:
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 4581aa5..f8af2fc 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -218,8 +218,8 @@ struct bonding {
struct slave *primary_slave;
bool force_primary;
s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
- int (*recv_probe)(struct sk_buff *, struct bonding *,
- struct slave *);
+ int (*recv_probe)(const struct sk_buff *, struct bonding *,
+ struct slave *);
rwlock_t lock;
rwlock_t curr_slave_lock;
u8 send_peer_notif;
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 1520814..0def8b3 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/string.h>
#include <linux/list.h>
@@ -20,7 +19,7 @@
#include <linux/sched.h>
#include <linux/if_arp.h>
#include <linux/timer.h>
-#include <linux/rtnetlink.h>
+#include <net/rtnetlink.h>
#include <linux/pkt_sched.h>
#include <net/caif/caif_layer.h>
#include <net/caif/caif_hsi.h>
@@ -33,59 +32,46 @@ MODULE_DESCRIPTION("CAIF HSI driver");
#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
(((pow)-((x)&((pow)-1)))))
-static int inactivity_timeout = 1000;
-module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
+static const struct cfhsi_config hsi_default_config = {
-static int aggregation_timeout = 1;
-module_param(aggregation_timeout, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(aggregation_timeout, "Aggregation timeout on HSI, ms.");
+ /* Inactivity timeout on HSI, ms */
+ .inactivity_timeout = HZ,
-/*
- * HSI padding options.
- * Warning: must be a base of 2 (& operation used) and can not be zero !
- */
-static int hsi_head_align = 4;
-module_param(hsi_head_align, int, S_IRUGO);
-MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");
+ /* Aggregation timeout (ms) of zero means no aggregation is done*/
+ .aggregation_timeout = 1,
-static int hsi_tail_align = 4;
-module_param(hsi_tail_align, int, S_IRUGO);
-MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment.");
-
-/*
- * HSI link layer flowcontrol thresholds.
- * Warning: A high threshold value migth increase throughput but it will at
- * the same time prevent channel prioritization and increase the risk of
- * flooding the modem. The high threshold should be above the low.
- */
-static int hsi_high_threshold = 100;
-module_param(hsi_high_threshold, int, S_IRUGO);
-MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF).");
+ /*
+ * HSI link layer flow-control thresholds.
+ * Threshold values for the HSI packet queue. Flow-control will be
+ * asserted when the number of packets exceeds q_high_mark. It will
+ * not be de-asserted before the number of packets drops below
+ * q_low_mark.
+ * Warning: A high threshold value might increase throughput but it
+ * will at the same time prevent channel prioritization and increase
+ * the risk of flooding the modem. The high threshold should be above
+ * the low.
+ */
+ .q_high_mark = 100,
+ .q_low_mark = 50,
-static int hsi_low_threshold = 50;
-module_param(hsi_low_threshold, int, S_IRUGO);
-MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON).");
+ /*
+ * HSI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
+ .head_align = 4,
+ .tail_align = 4,
+};
#define ON 1
#define OFF 0
-/*
- * Threshold values for the HSI packet queue. Flowcontrol will be asserted
- * when the number of packets exceeds HIGH_WATER_MARK. It will not be
- * de-asserted before the number of packets drops below LOW_WATER_MARK.
- */
-#define LOW_WATER_MARK hsi_low_threshold
-#define HIGH_WATER_MARK hsi_high_threshold
-
static LIST_HEAD(cfhsi_list);
-static spinlock_t cfhsi_list_lock;
static void cfhsi_inactivity_tout(unsigned long arg)
{
struct cfhsi *cfhsi = (struct cfhsi *)arg;
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
/* Schedule power down work queue. */
@@ -101,8 +87,8 @@ static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
int hpad, tpad, len;
info = (struct caif_payload_info *)&skb->cb;
- hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
- tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
+ hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
+ tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
len = skb->len + hpad + tpad;
if (direction > 0)
@@ -115,7 +101,7 @@ static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
{
int i;
- if (cfhsi->aggregation_timeout < 0)
+ if (cfhsi->cfg.aggregation_timeout == 0)
return true;
for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
@@ -171,7 +157,7 @@ static void cfhsi_abort_tx(struct cfhsi *cfhsi)
cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
mod_timer(&cfhsi->inactivity_timer,
- jiffies + cfhsi->inactivity_timeout);
+ jiffies + cfhsi->cfg.inactivity_timeout);
spin_unlock_bh(&cfhsi->lock);
}
@@ -181,14 +167,14 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
size_t fifo_occupancy;
int ret;
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
do {
- ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
+ ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
&fifo_occupancy);
if (ret) {
- dev_warn(&cfhsi->ndev->dev,
+ netdev_warn(cfhsi->ndev,
"%s: can't get FIFO occupancy: %d.\n",
__func__, ret);
break;
@@ -198,11 +184,11 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
- ret = cfhsi->dev->cfhsi_rx(buffer, fifo_occupancy,
- cfhsi->dev);
+ ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy,
+ cfhsi->ops);
if (ret) {
clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
- dev_warn(&cfhsi->ndev->dev,
+ netdev_warn(cfhsi->ndev,
"%s: can't read data: %d.\n",
__func__, ret);
break;
@@ -213,13 +199,13 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
!test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
if (ret < 0) {
- dev_warn(&cfhsi->ndev->dev,
+ netdev_warn(cfhsi->ndev,
"%s: can't wait for flush complete: %d.\n",
__func__, ret);
break;
} else if (!ret) {
ret = -ETIMEDOUT;
- dev_warn(&cfhsi->ndev->dev,
+ netdev_warn(cfhsi->ndev,
"%s: timeout waiting for flush complete.\n",
__func__);
break;
@@ -246,14 +232,14 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Check if we can embed a CAIF frame. */
if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
struct caif_payload_info *info;
- int hpad = 0;
- int tpad = 0;
+ int hpad;
+ int tpad;
/* Calculate needed head alignment and tail alignment. */
info = (struct caif_payload_info *)&skb->cb;
- hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
- tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
+ hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
+ tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
/* Check if frame still fits with added alignment. */
if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
@@ -282,8 +268,8 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
while (nfrms < CFHSI_MAX_PKTS) {
struct caif_payload_info *info;
- int hpad = 0;
- int tpad = 0;
+ int hpad;
+ int tpad;
if (!skb)
skb = cfhsi_dequeue(cfhsi);
@@ -294,8 +280,8 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Calculate needed head alignment and tail alignment. */
info = (struct caif_payload_info *)&skb->cb;
- hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
- tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
+ hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
+ tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
/* Fill in CAIF frame length in descriptor. */
desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
@@ -348,7 +334,7 @@ static void cfhsi_start_tx(struct cfhsi *cfhsi)
struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
int len, res;
- dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
+ netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
@@ -366,22 +352,22 @@ static void cfhsi_start_tx(struct cfhsi *cfhsi)
cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
/* Start inactivity timer. */
mod_timer(&cfhsi->inactivity_timer,
- jiffies + cfhsi->inactivity_timeout);
+ jiffies + cfhsi->cfg.inactivity_timeout);
spin_unlock_bh(&cfhsi->lock);
break;
}
/* Set up new transfer. */
- res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
+ res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
if (WARN_ON(res < 0))
- dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
+ netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
__func__, res);
} while (res < 0);
}
static void cfhsi_tx_done(struct cfhsi *cfhsi)
{
- dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
+ netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
@@ -392,7 +378,7 @@ static void cfhsi_tx_done(struct cfhsi *cfhsi)
*/
spin_lock_bh(&cfhsi->lock);
if (cfhsi->flow_off_sent &&
- cfhsi_tx_queue_len(cfhsi) <= cfhsi->q_low_mark &&
+ cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark &&
cfhsi->cfdev.flowctrl) {
cfhsi->flow_off_sent = 0;
@@ -404,19 +390,19 @@ static void cfhsi_tx_done(struct cfhsi *cfhsi)
cfhsi_start_tx(cfhsi);
} else {
mod_timer(&cfhsi->aggregation_timer,
- jiffies + cfhsi->aggregation_timeout);
+ jiffies + cfhsi->cfg.aggregation_timeout);
spin_unlock_bh(&cfhsi->lock);
}
return;
}
-static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
+static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops)
{
struct cfhsi *cfhsi;
- cfhsi = container_of(drv, struct cfhsi, drv);
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
@@ -433,7 +419,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
if ((desc->header & ~CFHSI_PIGGY_DESC) ||
(desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
- dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
+ netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
__func__);
return -EPROTO;
}
@@ -455,7 +441,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Sanity check length of CAIF frame. */
if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
- dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n",
+ netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
__func__);
return -EPROTO;
}
@@ -463,7 +449,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Allocate SKB (OK even in IRQ context). */
skb = alloc_skb(len + 1, GFP_ATOMIC);
if (!skb) {
- dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
+ netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
__func__);
return -ENOMEM;
}
@@ -477,8 +463,8 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
skb->dev = cfhsi->ndev;
/*
- * We are called from a arch specific platform device.
- * Unfortunately we don't know what context we're
+ * We are in a callback handler and
+ * unfortunately we don't know what context we're
* running in.
*/
if (in_interrupt())
@@ -504,7 +490,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
xfer_sz += CFHSI_DESC_SZ;
if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
- dev_err(&cfhsi->ndev->dev,
+ netdev_err(cfhsi->ndev,
"%s: Invalid payload len: %d, ignored.\n",
__func__, xfer_sz);
return -EPROTO;
@@ -551,7 +537,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Sanity check header and offset. */
if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
(desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
- dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
+ netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
__func__);
return -EPROTO;
}
@@ -573,7 +559,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
struct sk_buff *skb;
u8 *dst = NULL;
u8 *pcffrm = NULL;
- int len = 0;
+ int len;
/* CAIF frame starts after head padding. */
pcffrm = pfrm + *pfrm + 1;
@@ -585,7 +571,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Sanity check length of CAIF frames. */
if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
- dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n",
+ netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
__func__);
return -EPROTO;
}
@@ -593,7 +579,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Allocate SKB (OK even in IRQ context). */
skb = alloc_skb(len + 1, GFP_ATOMIC);
if (!skb) {
- dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
+ netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
__func__);
cfhsi->rx_state.nfrms = nfrms;
return -ENOMEM;
@@ -608,7 +594,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
skb->dev = cfhsi->ndev;
/*
- * We're called from a platform device,
+ * We're called in callback from HSI
* and don't know the context we're running in.
*/
if (in_interrupt())
@@ -639,7 +625,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
desc = (struct cfhsi_desc *)cfhsi->rx_buf;
- dev_dbg(&cfhsi->ndev->dev, "%s\n", __func__);
+ netdev_dbg(cfhsi->ndev, "%s\n", __func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
@@ -647,7 +633,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
/* Update inactivity timer if pending. */
spin_lock_bh(&cfhsi->lock);
mod_timer_pending(&cfhsi->inactivity_timer,
- jiffies + cfhsi->inactivity_timeout);
+ jiffies + cfhsi->cfg.inactivity_timeout);
spin_unlock_bh(&cfhsi->lock);
if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
@@ -680,12 +666,11 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
if (desc_pld_len < 0)
goto out_of_sync;
- if (desc_pld_len > 0)
+ if (desc_pld_len > 0) {
rx_len = desc_pld_len;
-
- if (desc_pld_len > 0 &&
- (piggy_desc->header & CFHSI_PIGGY_DESC))
- rx_len += CFHSI_DESC_SZ;
+ if (piggy_desc->header & CFHSI_PIGGY_DESC)
+ rx_len += CFHSI_DESC_SZ;
+ }
/*
* Copy needed information from the piggy-backed
@@ -693,10 +678,6 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
*/
memcpy(rx_buf, (u8 *)piggy_desc,
CFHSI_DESC_SHORT_SZ);
- /* Mark no embedded frame here */
- piggy_desc->offset = 0;
- if (desc_pld_len == -EPROTO)
- goto out_of_sync;
}
}
@@ -712,13 +693,13 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
/* Initiate next read */
if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
/* Set up new transfer. */
- dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
+ netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
__func__);
- res = cfhsi->dev->cfhsi_rx(rx_ptr, rx_len,
- cfhsi->dev);
+ res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len,
+ cfhsi->ops);
if (WARN_ON(res < 0)) {
- dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
+ netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
__func__, res);
cfhsi->ndev->stats.rx_errors++;
cfhsi->ndev->stats.rx_dropped++;
@@ -737,6 +718,8 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
/* Extract any payload in piggyback descriptor. */
if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
goto out_of_sync;
+ /* Mark no embedded frame after extracting it */
+ piggy_desc->offset = 0;
}
}
@@ -753,7 +736,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
return;
out_of_sync:
- dev_err(&cfhsi->ndev->dev, "%s: Out of sync.\n", __func__);
+ netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
cfhsi->rx_buf, CFHSI_DESC_SZ);
schedule_work(&cfhsi->out_of_sync_work);
@@ -763,18 +746,18 @@ static void cfhsi_rx_slowpath(unsigned long arg)
{
struct cfhsi *cfhsi = (struct cfhsi *)arg;
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
cfhsi_rx_done(cfhsi);
}
-static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
+static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops)
{
struct cfhsi *cfhsi;
- cfhsi = container_of(drv, struct cfhsi, drv);
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
@@ -807,9 +790,9 @@ static void cfhsi_wake_up(struct work_struct *work)
}
/* Activate wake line. */
- cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
+ cfhsi->ops->cfhsi_wake_up(cfhsi->ops);
- dev_dbg(&cfhsi->ndev->dev, "%s: Start waiting.\n",
+ netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
__func__);
/* Wait for acknowledge. */
@@ -819,33 +802,33 @@ static void cfhsi_wake_up(struct work_struct *work)
&cfhsi->bits), ret);
if (unlikely(ret < 0)) {
/* Interrupted by signal. */
- dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
+ netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
__func__, ret);
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
- cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
+ cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
return;
} else if (!ret) {
bool ca_wake = false;
size_t fifo_occupancy = 0;
/* Wakeup timeout */
- dev_dbg(&cfhsi->ndev->dev, "%s: Timeout.\n",
+ netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
__func__);
/* Check FIFO to check if modem has sent something. */
- WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
+ WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
&fifo_occupancy));
- dev_dbg(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n",
+ netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
__func__, (unsigned) fifo_occupancy);
/* Check if we misssed the interrupt. */
- WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
+ WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
&ca_wake));
if (ca_wake) {
- dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n",
+ netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
__func__);
/* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
@@ -856,11 +839,11 @@ static void cfhsi_wake_up(struct work_struct *work)
}
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
- cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
+ cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
return;
}
wake_ack:
- dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n",
+ netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
__func__);
/* Clear power up bit. */
@@ -868,11 +851,11 @@ wake_ack:
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
/* Resume read operation. */
- dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", __func__);
- res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->dev);
+ netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
+ res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops);
if (WARN_ON(res < 0))
- dev_err(&cfhsi->ndev->dev, "%s: RX err %d.\n", __func__, res);
+ netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
/* Clear power up acknowledment. */
clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
@@ -881,16 +864,16 @@ wake_ack:
/* Resume transmit if queues are not empty. */
if (!cfhsi_tx_queue_len(cfhsi)) {
- dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
+ netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
__func__);
/* Start inactivity timer. */
mod_timer(&cfhsi->inactivity_timer,
- jiffies + cfhsi->inactivity_timeout);
+ jiffies + cfhsi->cfg.inactivity_timeout);
spin_unlock_bh(&cfhsi->lock);
return;
}
- dev_dbg(&cfhsi->ndev->dev, "%s: Host wake.\n",
+ netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
__func__);
spin_unlock_bh(&cfhsi->lock);
@@ -900,14 +883,14 @@ wake_ack:
if (likely(len > 0)) {
/* Set up new transfer. */
- res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
+ res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
if (WARN_ON(res < 0)) {
- dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
+ netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
__func__, res);
cfhsi_abort_tx(cfhsi);
}
} else {
- dev_err(&cfhsi->ndev->dev,
+ netdev_err(cfhsi->ndev,
"%s: Failed to create HSI frame: %d.\n",
__func__, len);
}
@@ -921,13 +904,13 @@ static void cfhsi_wake_down(struct work_struct *work)
int retry = CFHSI_WAKE_TOUT;
cfhsi = container_of(work, struct cfhsi, wake_down_work);
- dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
+ netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
/* Deactivate wake line. */
- cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
+ cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
/* Wait for acknowledge. */
ret = CFHSI_WAKE_TOUT;
@@ -936,26 +919,26 @@ static void cfhsi_wake_down(struct work_struct *work)
&cfhsi->bits), ret);
if (ret < 0) {
/* Interrupted by signal. */
- dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
+ netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
__func__, ret);
return;
} else if (!ret) {
bool ca_wake = true;
/* Timeout */
- dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", __func__);
+ netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
/* Check if we misssed the interrupt. */
- WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
+ WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
&ca_wake));
if (!ca_wake)
- dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n",
+ netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
__func__);
}
/* Check FIFO occupancy. */
while (retry) {
- WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
+ WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
&fifo_occupancy));
if (!fifo_occupancy)
@@ -967,14 +950,13 @@ static void cfhsi_wake_down(struct work_struct *work)
}
if (!retry)
- dev_err(&cfhsi->ndev->dev, "%s: FIFO Timeout.\n", __func__);
+ netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
/* Clear AWAKE condition. */
clear_bit(CFHSI_AWAKE, &cfhsi->bits);
/* Cancel pending RX requests. */
- cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
-
+ cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
}
static void cfhsi_out_of_sync(struct work_struct *work)
@@ -988,12 +970,12 @@ static void cfhsi_out_of_sync(struct work_struct *work)
rtnl_unlock();
}
-static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
+static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops)
{
struct cfhsi *cfhsi = NULL;
- cfhsi = container_of(drv, struct cfhsi, drv);
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
@@ -1007,12 +989,12 @@ static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
queue_work(cfhsi->wq, &cfhsi->wake_up_work);
}
-static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
+static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
{
struct cfhsi *cfhsi = NULL;
- cfhsi = container_of(drv, struct cfhsi, drv);
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
/* Initiating low power is only permitted by the host (us). */
@@ -1024,7 +1006,7 @@ static void cfhsi_aggregation_tout(unsigned long arg)
{
struct cfhsi *cfhsi = (struct cfhsi *)arg;
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
cfhsi_start_tx(cfhsi);
@@ -1077,7 +1059,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
/* Send flow off if number of packets is above high water mark. */
if (!cfhsi->flow_off_sent &&
- cfhsi_tx_queue_len(cfhsi) > cfhsi->q_high_mark &&
+ cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark &&
cfhsi->cfdev.flowctrl) {
cfhsi->flow_off_sent = 1;
cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
@@ -1114,9 +1096,9 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
WARN_ON(!len);
/* Set up new transfer. */
- res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
+ res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
if (WARN_ON(res < 0)) {
- dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
+ netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
__func__, res);
cfhsi_abort_tx(cfhsi);
}
@@ -1129,19 +1111,19 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
-static const struct net_device_ops cfhsi_ops;
+static const struct net_device_ops cfhsi_netdevops;
static void cfhsi_setup(struct net_device *dev)
{
int i;
struct cfhsi *cfhsi = netdev_priv(dev);
dev->features = 0;
- dev->netdev_ops = &cfhsi_ops;
dev->type = ARPHRD_CAIF;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
dev->tx_queue_len = 0;
dev->destructor = free_netdev;
+ dev->netdev_ops = &cfhsi_netdevops;
for (i = 0; i < CFHSI_PRIO_LAST; ++i)
skb_queue_head_init(&cfhsi->qhead[i]);
cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
@@ -1149,42 +1131,7 @@ static void cfhsi_setup(struct net_device *dev)
cfhsi->cfdev.use_stx = false;
cfhsi->cfdev.use_fcs = false;
cfhsi->ndev = dev;
-}
-
-int cfhsi_probe(struct platform_device *pdev)
-{
- struct cfhsi *cfhsi = NULL;
- struct net_device *ndev;
-
- int res;
-
- ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
- if (!ndev)
- return -ENODEV;
-
- cfhsi = netdev_priv(ndev);
- cfhsi->ndev = ndev;
- cfhsi->pdev = pdev;
-
- /* Assign the HSI device. */
- cfhsi->dev = pdev->dev.platform_data;
-
- /* Assign the driver to this HSI device. */
- cfhsi->dev->drv = &cfhsi->drv;
-
- /* Register network device. */
- res = register_netdev(ndev);
- if (res) {
- dev_err(&ndev->dev, "%s: Registration error: %d.\n",
- __func__, res);
- free_netdev(ndev);
- }
- /* Add CAIF HSI device to list. */
- spin_lock(&cfhsi_list_lock);
- list_add_tail(&cfhsi->list, &cfhsi_list);
- spin_unlock(&cfhsi_list_lock);
-
- return res;
+ cfhsi->cfg = hsi_default_config;
}
static int cfhsi_open(struct net_device *ndev)
@@ -1200,9 +1147,6 @@ static int cfhsi_open(struct net_device *ndev)
/* Set flow info */
cfhsi->flow_off_sent = 0;
- cfhsi->q_low_mark = LOW_WATER_MARK;
- cfhsi->q_high_mark = HIGH_WATER_MARK;
-
/*
* Allocate a TX buffer with the size of a HSI packet descriptors
@@ -1230,20 +1174,8 @@ static int cfhsi_open(struct net_device *ndev)
goto err_alloc_rx_flip;
}
- /* Pre-calculate inactivity timeout. */
- if (inactivity_timeout != -1) {
- cfhsi->inactivity_timeout =
- inactivity_timeout * HZ / 1000;
- if (!cfhsi->inactivity_timeout)
- cfhsi->inactivity_timeout = 1;
- else if (cfhsi->inactivity_timeout > NEXT_TIMER_MAX_DELTA)
- cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
- } else {
- cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
- }
-
/* Initialize aggregation timeout */
- cfhsi->aggregation_timeout = aggregation_timeout;
+ cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout;
/* Initialize recieve vaiables. */
cfhsi->rx_ptr = cfhsi->rx_buf;
@@ -1253,10 +1185,10 @@ static int cfhsi_open(struct net_device *ndev)
spin_lock_init(&cfhsi->lock);
/* Set up the driver. */
- cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb;
- cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb;
- cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb;
- cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb;
+ cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb;
+ cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb;
+ cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb;
+ cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb;
/* Initialize the work queues. */
INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
@@ -1270,9 +1202,9 @@ static int cfhsi_open(struct net_device *ndev)
clear_bit(CFHSI_AWAKE, &cfhsi->bits);
/* Create work thread. */
- cfhsi->wq = create_singlethread_workqueue(cfhsi->pdev->name);
+ cfhsi->wq = create_singlethread_workqueue(cfhsi->ndev->name);
if (!cfhsi->wq) {
- dev_err(&cfhsi->ndev->dev, "%s: Failed to create work queue.\n",
+ netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n",
__func__);
res = -ENODEV;
goto err_create_wq;
@@ -1297,9 +1229,9 @@ static int cfhsi_open(struct net_device *ndev)
cfhsi->aggregation_timer.function = cfhsi_aggregation_tout;
/* Activate HSI interface. */
- res = cfhsi->dev->cfhsi_up(cfhsi->dev);
+ res = cfhsi->ops->cfhsi_up(cfhsi->ops);
if (res) {
- dev_err(&cfhsi->ndev->dev,
+ netdev_err(cfhsi->ndev,
"%s: can't activate HSI interface: %d.\n",
__func__, res);
goto err_activate;
@@ -1308,14 +1240,14 @@ static int cfhsi_open(struct net_device *ndev)
/* Flush FIFO */
res = cfhsi_flush_fifo(cfhsi);
if (res) {
- dev_err(&cfhsi->ndev->dev, "%s: Can't flush FIFO: %d.\n",
+ netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n",
__func__, res);
goto err_net_reg;
}
return res;
err_net_reg:
- cfhsi->dev->cfhsi_down(cfhsi->dev);
+ cfhsi->ops->cfhsi_down(cfhsi->ops);
err_activate:
destroy_workqueue(cfhsi->wq);
err_create_wq:
@@ -1345,7 +1277,7 @@ static int cfhsi_close(struct net_device *ndev)
del_timer_sync(&cfhsi->aggregation_timer);
/* Cancel pending RX request (if any) */
- cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
+ cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
/* Destroy workqueue */
destroy_workqueue(cfhsi->wq);
@@ -1358,7 +1290,7 @@ static int cfhsi_close(struct net_device *ndev)
cfhsi_abort_tx(cfhsi);
/* Deactivate interface */
- cfhsi->dev->cfhsi_down(cfhsi->dev);
+ cfhsi->ops->cfhsi_down(cfhsi->ops);
/* Free buffers. */
kfree(tx_buf);
@@ -1367,85 +1299,184 @@ static int cfhsi_close(struct net_device *ndev)
return 0;
}
-static const struct net_device_ops cfhsi_ops = {
+static void cfhsi_uninit(struct net_device *dev)
+{
+ struct cfhsi *cfhsi = netdev_priv(dev);
+ ASSERT_RTNL();
+ symbol_put(cfhsi_get_device);
+ list_del(&cfhsi->list);
+}
+
+static const struct net_device_ops cfhsi_netdevops = {
+ .ndo_uninit = cfhsi_uninit,
.ndo_open = cfhsi_open,
.ndo_stop = cfhsi_close,
.ndo_start_xmit = cfhsi_xmit
};
-int cfhsi_remove(struct platform_device *pdev)
+static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi)
{
- struct list_head *list_node;
- struct list_head *n;
- struct cfhsi *cfhsi = NULL;
- struct cfhsi_dev *dev;
+ int i;
- dev = (struct cfhsi_dev *)pdev->dev.platform_data;
- spin_lock(&cfhsi_list_lock);
- list_for_each_safe(list_node, n, &cfhsi_list) {
- cfhsi = list_entry(list_node, struct cfhsi, list);
- /* Find the corresponding device. */
- if (cfhsi->dev == dev) {
- /* Remove from list. */
- list_del(list_node);
- spin_unlock(&cfhsi_list_lock);
- return 0;
- }
+ if (!data) {
+ pr_debug("no params data found\n");
+ return;
}
- spin_unlock(&cfhsi_list_lock);
- return -ENODEV;
+
+ i = __IFLA_CAIF_HSI_INACTIVITY_TOUT;
+ /*
+ * Inactivity timeout in millisecs. Lowest possible value is 1,
+ * and highest possible is NEXT_TIMER_MAX_DELTA.
+ */
+ if (data[i]) {
+ u32 inactivity_timeout = nla_get_u32(data[i]);
+ /* Pre-calculate inactivity timeout. */
+ cfhsi->cfg.inactivity_timeout = inactivity_timeout * HZ / 1000;
+ if (cfhsi->cfg.inactivity_timeout == 0)
+ cfhsi->cfg.inactivity_timeout = 1;
+ else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA)
+ cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA;
+ }
+
+ i = __IFLA_CAIF_HSI_AGGREGATION_TOUT;
+ if (data[i])
+ cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]);
+
+ i = __IFLA_CAIF_HSI_HEAD_ALIGN;
+ if (data[i])
+ cfhsi->cfg.head_align = nla_get_u32(data[i]);
+
+ i = __IFLA_CAIF_HSI_TAIL_ALIGN;
+ if (data[i])
+ cfhsi->cfg.tail_align = nla_get_u32(data[i]);
+
+ i = __IFLA_CAIF_HSI_QHIGH_WATERMARK;
+ if (data[i])
+ cfhsi->cfg.q_high_mark = nla_get_u32(data[i]);
+
+ i = __IFLA_CAIF_HSI_QLOW_WATERMARK;
+ if (data[i])
+ cfhsi->cfg.q_low_mark = nla_get_u32(data[i]);
+}
+
+static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[])
+{
+ cfhsi_netlink_parms(data, netdev_priv(dev));
+ netdev_state_change(dev);
+ return 0;
}
-struct platform_driver cfhsi_plat_drv = {
- .probe = cfhsi_probe,
- .remove = cfhsi_remove,
- .driver = {
- .name = "cfhsi",
- .owner = THIS_MODULE,
- },
+static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = {
+ [__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 },
+ [__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 },
+ [__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 },
+ [__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 },
+ [__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 },
+ [__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 },
};
-static void __exit cfhsi_exit_module(void)
+static size_t caif_hsi_get_size(const struct net_device *dev)
+{
+ int i;
+ size_t s = 0;
+ for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++)
+ s += nla_total_size(caif_hsi_policy[i].len);
+ return s;
+}
+
+static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct cfhsi *cfhsi = netdev_priv(dev);
+
+ if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT,
+ cfhsi->cfg.inactivity_timeout) ||
+ nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT,
+ cfhsi->cfg.aggregation_timeout) ||
+ nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN,
+ cfhsi->cfg.head_align) ||
+ nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN,
+ cfhsi->cfg.tail_align) ||
+ nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK,
+ cfhsi->cfg.q_high_mark) ||
+ nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK,
+ cfhsi->cfg.q_low_mark))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
{
- struct list_head *list_node;
- struct list_head *n;
struct cfhsi *cfhsi = NULL;
+ struct cfhsi_ops *(*get_ops)(void);
- spin_lock(&cfhsi_list_lock);
- list_for_each_safe(list_node, n, &cfhsi_list) {
- cfhsi = list_entry(list_node, struct cfhsi, list);
+ ASSERT_RTNL();
- /* Remove from list. */
- list_del(list_node);
- spin_unlock(&cfhsi_list_lock);
+ cfhsi = netdev_priv(dev);
+ cfhsi_netlink_parms(data, cfhsi);
+ dev_net_set(cfhsi->ndev, src_net);
+
+ get_ops = symbol_get(cfhsi_get_ops);
+ if (!get_ops) {
+ pr_err("%s: failed to get the cfhsi_ops\n", __func__);
+ return -ENODEV;
+ }
- unregister_netdevice(cfhsi->ndev);
+ /* Assign the HSI device. */
+ cfhsi->ops = (*get_ops)();
+ if (!cfhsi->ops) {
+ pr_err("%s: failed to get the cfhsi_ops\n", __func__);
+ goto err;
+ }
- spin_lock(&cfhsi_list_lock);
+ /* Assign the driver to this HSI device. */
+ cfhsi->ops->cb_ops = &cfhsi->cb_ops;
+ if (register_netdevice(dev)) {
+ pr_warn("%s: caif_hsi device registration failed\n", __func__);
+ goto err;
}
- spin_unlock(&cfhsi_list_lock);
+ /* Add CAIF HSI device to list. */
+ list_add_tail(&cfhsi->list, &cfhsi_list);
- /* Unregister platform driver. */
- platform_driver_unregister(&cfhsi_plat_drv);
+ return 0;
+err:
+ symbol_put(cfhsi_get_ops);
+ return -ENODEV;
}
-static int __init cfhsi_init_module(void)
+static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
+ .kind = "cfhsi",
+ .priv_size = sizeof(struct cfhsi),
+ .setup = cfhsi_setup,
+ .maxtype = __IFLA_CAIF_HSI_MAX,
+ .policy = caif_hsi_policy,
+ .newlink = caif_hsi_newlink,
+ .changelink = caif_hsi_changelink,
+ .get_size = caif_hsi_get_size,
+ .fill_info = caif_hsi_fill_info,
+};
+
+static void __exit cfhsi_exit_module(void)
{
- int result;
+ struct list_head *list_node;
+ struct list_head *n;
+ struct cfhsi *cfhsi;
- /* Initialize spin lock. */
- spin_lock_init(&cfhsi_list_lock);
+ rtnl_link_unregister(&caif_hsi_link_ops);
- /* Register platform driver. */
- result = platform_driver_register(&cfhsi_plat_drv);
- if (result) {
- printk(KERN_ERR "Could not register platform HSI driver: %d.\n",
- result);
- goto err_dev_register;
+ rtnl_lock();
+ list_for_each_safe(list_node, n, &cfhsi_list) {
+ cfhsi = list_entry(list_node, struct cfhsi, list);
+ unregister_netdev(cfhsi->ndev);
}
+ rtnl_unlock();
+}
- err_dev_register:
- return result;
+static int __init cfhsi_init_module(void)
+{
+ return rtnl_link_register(&caif_hsi_link_ops);
}
module_init(cfhsi_init_module);
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 3f88473..ea31438 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -597,7 +597,7 @@ static int __devinit bfin_can_probe(struct platform_device *pdev)
dev_info(&pdev->dev,
"%s device registered"
"(&reg_base=%p, rx_irq=%d, tx_irq=%d, err_irq=%d, sclk=%d)\n",
- DRV_NAME, (void *)priv->membase, priv->rx_irq,
+ DRV_NAME, priv->membase, priv->rx_irq,
priv->tx_irq, priv->err_irq, priv->can.clock.freq);
return 0;
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
index ffb9773..3b83baf 100644
--- a/drivers/net/can/c_can/Kconfig
+++ b/drivers/net/can/c_can/Kconfig
@@ -1,15 +1,23 @@
menuconfig CAN_C_CAN
- tristate "Bosch C_CAN devices"
+ tristate "Bosch C_CAN/D_CAN devices"
depends on CAN_DEV && HAS_IOMEM
if CAN_C_CAN
config CAN_C_CAN_PLATFORM
- tristate "Generic Platform Bus based C_CAN driver"
+ tristate "Generic Platform Bus based C_CAN/D_CAN driver"
---help---
- This driver adds support for the C_CAN chips connected to
- the "platform bus" (Linux abstraction for directly to the
+ This driver adds support for the C_CAN/D_CAN chips connected
+ to the "platform bus" (Linux abstraction for directly to the
processor attached devices) which can be found on various
- boards from ST Microelectronics (http://www.st.com)
- like the SPEAr1310 and SPEAr320 evaluation boards.
+ boards from ST Microelectronics (http://www.st.com) like the
+ SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com)
+ boards like am335x, dm814x, dm813x and dm811x.
+
+config CAN_C_CAN_PCI
+ tristate "Generic PCI Bus based C_CAN/D_CAN driver"
+ depends on PCI
+ ---help---
+ This driver adds support for the C_CAN/D_CAN chips connected
+ to the PCI bus.
endif
diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile
index 9273f6d..ad1cc84 100644
--- a/drivers/net/can/c_can/Makefile
+++ b/drivers/net/can/c_can/Makefile
@@ -4,5 +4,6 @@
obj-$(CONFIG_CAN_C_CAN) += c_can.o
obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o
+obj-$(CONFIG_CAN_C_CAN_PCI) += c_can_pci.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 536bda0..eea6608 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -41,6 +41,10 @@
#include "c_can.h"
+/* Number of interface registers */
+#define IF_ENUM_REG_LEN 11
+#define C_CAN_IFACE(reg, iface) (C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN)
+
/* control register */
#define CONTROL_TEST BIT(7)
#define CONTROL_CCE BIT(6)
@@ -209,10 +213,10 @@ static inline int get_tx_echo_msg_obj(const struct c_can_priv *priv)
C_CAN_MSG_OBJ_TX_FIRST;
}
-static u32 c_can_read_reg32(struct c_can_priv *priv, void *reg)
+static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index)
{
- u32 val = priv->read_reg(priv, reg);
- val |= ((u32) priv->read_reg(priv, reg + 2)) << 16;
+ u32 val = priv->read_reg(priv, index);
+ val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
return val;
}
@@ -220,14 +224,14 @@ static void c_can_enable_all_interrupts(struct c_can_priv *priv,
int enable)
{
unsigned int cntrl_save = priv->read_reg(priv,
- &priv->regs->control);
+ C_CAN_CTRL_REG);
if (enable)
cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE);
else
cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
- priv->write_reg(priv, &priv->regs->control, cntrl_save);
+ priv->write_reg(priv, C_CAN_CTRL_REG, cntrl_save);
}
static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
@@ -235,7 +239,7 @@ static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
int count = MIN_TIMEOUT_VALUE;
while (count && priv->read_reg(priv,
- &priv->regs->ifregs[iface].com_req) &
+ C_CAN_IFACE(COMREQ_REG, iface)) &
IF_COMR_BUSY) {
count--;
udelay(1);
@@ -258,9 +262,9 @@ static inline void c_can_object_get(struct net_device *dev,
* register and message RAM must be complete in 6 CAN-CLK
* period.
*/
- priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
+ priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
IFX_WRITE_LOW_16BIT(mask));
- priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
+ priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
IFX_WRITE_LOW_16BIT(objno));
if (c_can_msg_obj_is_busy(priv, iface))
@@ -278,9 +282,9 @@ static inline void c_can_object_put(struct net_device *dev,
* register and message RAM must be complete in 6 CAN-CLK
* period.
*/
- priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
+ priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
(IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
- priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
+ priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
IFX_WRITE_LOW_16BIT(objno));
if (c_can_msg_obj_is_busy(priv, iface))
@@ -306,18 +310,18 @@ static void c_can_write_msg_object(struct net_device *dev,
flags |= IF_ARB_MSGVAL;
- priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
+ priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
IFX_WRITE_LOW_16BIT(id));
- priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, flags |
+ priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), flags |
IFX_WRITE_HIGH_16BIT(id));
for (i = 0; i < frame->can_dlc; i += 2) {
- priv->write_reg(priv, &priv->regs->ifregs[iface].data[i / 2],
+ priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
frame->data[i] | (frame->data[i + 1] << 8));
}
/* enable interrupt for this message object */
- priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
frame->can_dlc);
c_can_object_put(dev, iface, objno, IF_COMM_ALL);
@@ -329,7 +333,7 @@ static inline void c_can_mark_rx_msg_obj(struct net_device *dev,
{
struct c_can_priv *priv = netdev_priv(dev);
- priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
ctrl_mask & ~(IF_MCONT_MSGLST | IF_MCONT_INTPND));
c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
@@ -343,7 +347,7 @@ static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
struct c_can_priv *priv = netdev_priv(dev);
for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) {
- priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
ctrl_mask & ~(IF_MCONT_MSGLST |
IF_MCONT_INTPND | IF_MCONT_NEWDAT));
c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
@@ -356,7 +360,7 @@ static inline void c_can_activate_rx_msg_obj(struct net_device *dev,
{
struct c_can_priv *priv = netdev_priv(dev);
- priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
ctrl_mask & ~(IF_MCONT_MSGLST |
IF_MCONT_INTPND | IF_MCONT_NEWDAT));
c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
@@ -374,7 +378,7 @@ static void c_can_handle_lost_msg_obj(struct net_device *dev,
c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
- priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
IF_MCONT_CLR_MSGLST);
c_can_object_put(dev, 0, objno, IF_COMM_CONTROL);
@@ -410,8 +414,8 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
frame->can_dlc = get_can_dlc(ctrl & 0x0F);
- flags = priv->read_reg(priv, &priv->regs->ifregs[iface].arb2);
- val = priv->read_reg(priv, &priv->regs->ifregs[iface].arb1) |
+ flags = priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface));
+ val = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface)) |
(flags << 16);
if (flags & IF_ARB_MSGXTD)
@@ -424,7 +428,7 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
else {
for (i = 0; i < frame->can_dlc; i += 2) {
data = priv->read_reg(priv,
- &priv->regs->ifregs[iface].data[i / 2]);
+ C_CAN_IFACE(DATA1_REG, iface) + i / 2);
frame->data[i] = data;
frame->data[i + 1] = data >> 8;
}
@@ -444,40 +448,40 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
{
struct c_can_priv *priv = netdev_priv(dev);
- priv->write_reg(priv, &priv->regs->ifregs[iface].mask1,
+ priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
IFX_WRITE_LOW_16BIT(mask));
- priv->write_reg(priv, &priv->regs->ifregs[iface].mask2,
+ priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
IFX_WRITE_HIGH_16BIT(mask));
- priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
+ priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
IFX_WRITE_LOW_16BIT(id));
- priv->write_reg(priv, &priv->regs->ifregs[iface].arb2,
+ priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface),
(IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
- priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, mcont);
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
- c_can_read_reg32(priv, &priv->regs->msgval1));
+ c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
}
static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
{
struct c_can_priv *priv = netdev_priv(dev);
- priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, 0);
- priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, 0);
- priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 0);
+ priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
+ priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
- c_can_read_reg32(priv, &priv->regs->msgval1));
+ c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
}
static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
{
- int val = c_can_read_reg32(priv, &priv->regs->txrqst1);
+ int val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
/*
* as transmission request register's bit n-1 corresponds to
@@ -540,12 +544,12 @@ static int c_can_set_bittiming(struct net_device *dev)
netdev_info(dev,
"setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
- ctrl_save = priv->read_reg(priv, &priv->regs->control);
- priv->write_reg(priv, &priv->regs->control,
+ ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG);
+ priv->write_reg(priv, C_CAN_CTRL_REG,
ctrl_save | CONTROL_CCE | CONTROL_INIT);
- priv->write_reg(priv, &priv->regs->btr, reg_btr);
- priv->write_reg(priv, &priv->regs->brp_ext, reg_brpe);
- priv->write_reg(priv, &priv->regs->control, ctrl_save);
+ priv->write_reg(priv, C_CAN_BTR_REG, reg_btr);
+ priv->write_reg(priv, C_CAN_BRPEXT_REG, reg_brpe);
+ priv->write_reg(priv, C_CAN_CTRL_REG, ctrl_save);
return 0;
}
@@ -587,36 +591,36 @@ static void c_can_chip_config(struct net_device *dev)
struct c_can_priv *priv = netdev_priv(dev);
/* enable automatic retransmission */
- priv->write_reg(priv, &priv->regs->control,
+ priv->write_reg(priv, C_CAN_CTRL_REG,
CONTROL_ENABLE_AR);
- if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
- CAN_CTRLMODE_LOOPBACK)) {
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
+ (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
/* loopback + silent mode : useful for hot self-test */
- priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
- priv->write_reg(priv, &priv->regs->test,
+ priv->write_reg(priv, C_CAN_TEST_REG,
TEST_LBACK | TEST_SILENT);
} else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
/* loopback mode : useful for self-test function */
- priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
- priv->write_reg(priv, &priv->regs->test, TEST_LBACK);
+ priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK);
} else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
/* silent mode : bus-monitoring mode */
- priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
- priv->write_reg(priv, &priv->regs->test, TEST_SILENT);
+ priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT);
} else
/* normal mode*/
- priv->write_reg(priv, &priv->regs->control,
+ priv->write_reg(priv, C_CAN_CTRL_REG,
CONTROL_EIE | CONTROL_SIE | CONTROL_IE);
/* configure message objects */
c_can_configure_msg_objects(dev);
/* set a `lec` value so that we can check for updates later */
- priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
+ priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
/* set bittiming params */
c_can_set_bittiming(dev);
@@ -669,7 +673,7 @@ static int c_can_get_berr_counter(const struct net_device *dev,
unsigned int reg_err_counter;
struct c_can_priv *priv = netdev_priv(dev);
- reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
+ reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
ERR_CNT_REC_SHIFT;
bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
@@ -686,7 +690,7 @@ static int c_can_get_berr_counter(const struct net_device *dev,
*
* We iterate from priv->tx_echo to priv->tx_next and check if the
* packet has been transmitted, echo it back to the CAN framework.
- * If we discover a not yet transmitted package, stop looking for more.
+ * If we discover a not yet transmitted packet, stop looking for more.
*/
static void c_can_do_tx(struct net_device *dev)
{
@@ -697,15 +701,17 @@ static void c_can_do_tx(struct net_device *dev)
for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
msg_obj_no = get_tx_echo_msg_obj(priv);
- val = c_can_read_reg32(priv, &priv->regs->txrqst1);
- if (!(val & (1 << msg_obj_no))) {
+ val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
+ if (!(val & (1 << (msg_obj_no - 1)))) {
can_get_echo_skb(dev,
msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
stats->tx_bytes += priv->read_reg(priv,
- &priv->regs->ifregs[0].msg_cntrl)
+ C_CAN_IFACE(MSGCTRL_REG, 0))
& IF_MCONT_DLC_MASK;
stats->tx_packets++;
c_can_inval_msg_object(dev, 0, msg_obj_no);
+ } else {
+ break;
}
}
@@ -742,11 +748,11 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
u32 num_rx_pkts = 0;
unsigned int msg_obj, msg_ctrl_save;
struct c_can_priv *priv = netdev_priv(dev);
- u32 val = c_can_read_reg32(priv, &priv->regs->intpnd1);
+ u32 val = c_can_read_reg32(priv, C_CAN_INTPND1_REG);
for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST;
msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0;
- val = c_can_read_reg32(priv, &priv->regs->intpnd1),
+ val = c_can_read_reg32(priv, C_CAN_INTPND1_REG),
msg_obj++) {
/*
* as interrupt pending register's bit n-1 corresponds to
@@ -756,7 +762,7 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL &
~IF_COMM_TXRQST);
msg_ctrl_save = priv->read_reg(priv,
- &priv->regs->ifregs[0].msg_cntrl);
+ C_CAN_IFACE(MSGCTRL_REG, 0));
if (msg_ctrl_save & IF_MCONT_EOB)
return num_rx_pkts;
@@ -817,7 +823,7 @@ static int c_can_handle_state_change(struct net_device *dev,
return 0;
c_can_get_berr_counter(dev, &bec);
- reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
+ reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
ERR_CNT_RP_SHIFT;
@@ -933,7 +939,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
}
/* set a `lec` value so that we can check for updates later */
- priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
+ priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
netif_receive_skb(skb);
stats->rx_packets++;
@@ -950,22 +956,22 @@ static int c_can_poll(struct napi_struct *napi, int quota)
struct net_device *dev = napi->dev;
struct c_can_priv *priv = netdev_priv(dev);
- irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+ irqstatus = priv->irqstatus;
if (!irqstatus)
goto end;
/* status events have the highest priority */
if (irqstatus == STATUS_INTERRUPT) {
priv->current_status = priv->read_reg(priv,
- &priv->regs->status);
+ C_CAN_STS_REG);
/* handle Tx/Rx events */
if (priv->current_status & STATUS_TXOK)
- priv->write_reg(priv, &priv->regs->status,
+ priv->write_reg(priv, C_CAN_STS_REG,
priv->current_status & ~STATUS_TXOK);
if (priv->current_status & STATUS_RXOK)
- priv->write_reg(priv, &priv->regs->status,
+ priv->write_reg(priv, C_CAN_STS_REG,
priv->current_status & ~STATUS_RXOK);
/* handle state changes */
@@ -1028,12 +1034,11 @@ end:
static irqreturn_t c_can_isr(int irq, void *dev_id)
{
- u16 irqstatus;
struct net_device *dev = (struct net_device *)dev_id;
struct c_can_priv *priv = netdev_priv(dev);
- irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
- if (!irqstatus)
+ priv->irqstatus = priv->read_reg(priv, C_CAN_INT_REG);
+ if (!priv->irqstatus)
return IRQ_NONE;
/* disable all interrupts and schedule the NAPI */
@@ -1063,10 +1068,11 @@ static int c_can_open(struct net_device *dev)
goto exit_irq_fail;
}
+ napi_enable(&priv->napi);
+
/* start the c_can controller */
c_can_start(dev);
- napi_enable(&priv->napi);
netif_start_queue(dev);
return 0;
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 9b7fbef..01a7049 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -22,43 +22,129 @@
#ifndef C_CAN_H
#define C_CAN_H
-/* c_can IF registers */
-struct c_can_if_regs {
- u16 com_req;
- u16 com_mask;
- u16 mask1;
- u16 mask2;
- u16 arb1;
- u16 arb2;
- u16 msg_cntrl;
- u16 data[4];
- u16 _reserved[13];
+enum reg {
+ C_CAN_CTRL_REG = 0,
+ C_CAN_STS_REG,
+ C_CAN_ERR_CNT_REG,
+ C_CAN_BTR_REG,
+ C_CAN_INT_REG,
+ C_CAN_TEST_REG,
+ C_CAN_BRPEXT_REG,
+ C_CAN_IF1_COMREQ_REG,
+ C_CAN_IF1_COMMSK_REG,
+ C_CAN_IF1_MASK1_REG,
+ C_CAN_IF1_MASK2_REG,
+ C_CAN_IF1_ARB1_REG,
+ C_CAN_IF1_ARB2_REG,
+ C_CAN_IF1_MSGCTRL_REG,
+ C_CAN_IF1_DATA1_REG,
+ C_CAN_IF1_DATA2_REG,
+ C_CAN_IF1_DATA3_REG,
+ C_CAN_IF1_DATA4_REG,
+ C_CAN_IF2_COMREQ_REG,
+ C_CAN_IF2_COMMSK_REG,
+ C_CAN_IF2_MASK1_REG,
+ C_CAN_IF2_MASK2_REG,
+ C_CAN_IF2_ARB1_REG,
+ C_CAN_IF2_ARB2_REG,
+ C_CAN_IF2_MSGCTRL_REG,
+ C_CAN_IF2_DATA1_REG,
+ C_CAN_IF2_DATA2_REG,
+ C_CAN_IF2_DATA3_REG,
+ C_CAN_IF2_DATA4_REG,
+ C_CAN_TXRQST1_REG,
+ C_CAN_TXRQST2_REG,
+ C_CAN_NEWDAT1_REG,
+ C_CAN_NEWDAT2_REG,
+ C_CAN_INTPND1_REG,
+ C_CAN_INTPND2_REG,
+ C_CAN_MSGVAL1_REG,
+ C_CAN_MSGVAL2_REG,
};
-/* c_can hardware registers */
-struct c_can_regs {
- u16 control;
- u16 status;
- u16 err_cnt;
- u16 btr;
- u16 interrupt;
- u16 test;
- u16 brp_ext;
- u16 _reserved1;
- struct c_can_if_regs ifregs[2]; /* [0] = IF1 and [1] = IF2 */
- u16 _reserved2[8];
- u16 txrqst1;
- u16 txrqst2;
- u16 _reserved3[6];
- u16 newdat1;
- u16 newdat2;
- u16 _reserved4[6];
- u16 intpnd1;
- u16 intpnd2;
- u16 _reserved5[6];
- u16 msgval1;
- u16 msgval2;
- u16 _reserved6[6];
+static const u16 reg_map_c_can[] = {
+ [C_CAN_CTRL_REG] = 0x00,
+ [C_CAN_STS_REG] = 0x02,
+ [C_CAN_ERR_CNT_REG] = 0x04,
+ [C_CAN_BTR_REG] = 0x06,
+ [C_CAN_INT_REG] = 0x08,
+ [C_CAN_TEST_REG] = 0x0A,
+ [C_CAN_BRPEXT_REG] = 0x0C,
+ [C_CAN_IF1_COMREQ_REG] = 0x10,
+ [C_CAN_IF1_COMMSK_REG] = 0x12,
+ [C_CAN_IF1_MASK1_REG] = 0x14,
+ [C_CAN_IF1_MASK2_REG] = 0x16,
+ [C_CAN_IF1_ARB1_REG] = 0x18,
+ [C_CAN_IF1_ARB2_REG] = 0x1A,
+ [C_CAN_IF1_MSGCTRL_REG] = 0x1C,
+ [C_CAN_IF1_DATA1_REG] = 0x1E,
+ [C_CAN_IF1_DATA2_REG] = 0x20,
+ [C_CAN_IF1_DATA3_REG] = 0x22,
+ [C_CAN_IF1_DATA4_REG] = 0x24,
+ [C_CAN_IF2_COMREQ_REG] = 0x40,
+ [C_CAN_IF2_COMMSK_REG] = 0x42,
+ [C_CAN_IF2_MASK1_REG] = 0x44,
+ [C_CAN_IF2_MASK2_REG] = 0x46,
+ [C_CAN_IF2_ARB1_REG] = 0x48,
+ [C_CAN_IF2_ARB2_REG] = 0x4A,
+ [C_CAN_IF2_MSGCTRL_REG] = 0x4C,
+ [C_CAN_IF2_DATA1_REG] = 0x4E,
+ [C_CAN_IF2_DATA2_REG] = 0x50,
+ [C_CAN_IF2_DATA3_REG] = 0x52,
+ [C_CAN_IF2_DATA4_REG] = 0x54,
+ [C_CAN_TXRQST1_REG] = 0x80,
+ [C_CAN_TXRQST2_REG] = 0x82,
+ [C_CAN_NEWDAT1_REG] = 0x90,
+ [C_CAN_NEWDAT2_REG] = 0x92,
+ [C_CAN_INTPND1_REG] = 0xA0,
+ [C_CAN_INTPND2_REG] = 0xA2,
+ [C_CAN_MSGVAL1_REG] = 0xB0,
+ [C_CAN_MSGVAL2_REG] = 0xB2,
+};
+
+static const u16 reg_map_d_can[] = {
+ [C_CAN_CTRL_REG] = 0x00,
+ [C_CAN_STS_REG] = 0x04,
+ [C_CAN_ERR_CNT_REG] = 0x08,
+ [C_CAN_BTR_REG] = 0x0C,
+ [C_CAN_BRPEXT_REG] = 0x0E,
+ [C_CAN_INT_REG] = 0x10,
+ [C_CAN_TEST_REG] = 0x14,
+ [C_CAN_TXRQST1_REG] = 0x88,
+ [C_CAN_TXRQST2_REG] = 0x8A,
+ [C_CAN_NEWDAT1_REG] = 0x9C,
+ [C_CAN_NEWDAT2_REG] = 0x9E,
+ [C_CAN_INTPND1_REG] = 0xB0,
+ [C_CAN_INTPND2_REG] = 0xB2,
+ [C_CAN_MSGVAL1_REG] = 0xC4,
+ [C_CAN_MSGVAL2_REG] = 0xC6,
+ [C_CAN_IF1_COMREQ_REG] = 0x100,
+ [C_CAN_IF1_COMMSK_REG] = 0x102,
+ [C_CAN_IF1_MASK1_REG] = 0x104,
+ [C_CAN_IF1_MASK2_REG] = 0x106,
+ [C_CAN_IF1_ARB1_REG] = 0x108,
+ [C_CAN_IF1_ARB2_REG] = 0x10A,
+ [C_CAN_IF1_MSGCTRL_REG] = 0x10C,
+ [C_CAN_IF1_DATA1_REG] = 0x110,
+ [C_CAN_IF1_DATA2_REG] = 0x112,
+ [C_CAN_IF1_DATA3_REG] = 0x114,
+ [C_CAN_IF1_DATA4_REG] = 0x116,
+ [C_CAN_IF2_COMREQ_REG] = 0x120,
+ [C_CAN_IF2_COMMSK_REG] = 0x122,
+ [C_CAN_IF2_MASK1_REG] = 0x124,
+ [C_CAN_IF2_MASK2_REG] = 0x126,
+ [C_CAN_IF2_ARB1_REG] = 0x128,
+ [C_CAN_IF2_ARB2_REG] = 0x12A,
+ [C_CAN_IF2_MSGCTRL_REG] = 0x12C,
+ [C_CAN_IF2_DATA1_REG] = 0x130,
+ [C_CAN_IF2_DATA2_REG] = 0x132,
+ [C_CAN_IF2_DATA3_REG] = 0x134,
+ [C_CAN_IF2_DATA4_REG] = 0x136,
+};
+
+enum c_can_dev_id {
+ C_CAN_DEVTYPE,
+ D_CAN_DEVTYPE,
};
/* c_can private data structure */
@@ -69,13 +155,15 @@ struct c_can_priv {
int tx_object;
int current_status;
int last_status;
- u16 (*read_reg) (struct c_can_priv *priv, void *reg);
- void (*write_reg) (struct c_can_priv *priv, void *reg, u16 val);
- struct c_can_regs __iomem *regs;
+ u16 (*read_reg) (struct c_can_priv *priv, enum reg index);
+ void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val);
+ void __iomem *base;
+ const u16 *regs;
unsigned long irq_flags; /* for request_irq() */
unsigned int tx_next;
unsigned int tx_echo;
void *priv; /* for board-specific data */
+ u16 irqstatus;
};
struct net_device *alloc_c_can_dev(void);
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
new file mode 100644
index 0000000..1011146
--- /dev/null
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -0,0 +1,221 @@
+/*
+ * PCI bus driver for Bosch C_CAN/D_CAN controller
+ *
+ * Copyright (C) 2012 Federico Vaga <federico.vaga@gmail.com>
+ *
+ * Borrowed from c_can_platform.c
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+
+#include <linux/can/dev.h>
+
+#include "c_can.h"
+
+enum c_can_pci_reg_align {
+ C_CAN_REG_ALIGN_16,
+ C_CAN_REG_ALIGN_32,
+};
+
+struct c_can_pci_data {
+ /* Specify if is C_CAN or D_CAN */
+ enum c_can_dev_id type;
+ /* Set the register alignment in the memory */
+ enum c_can_pci_reg_align reg_align;
+ /* Set the frequency */
+ unsigned int freq;
+};
+
+/*
+ * 16-bit c_can registers can be arranged differently in the memory
+ * architecture of different implementations. For example: 16-bit
+ * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
+ * Handle the same by providing a common read/write interface.
+ */
+static u16 c_can_pci_read_reg_aligned_to_16bit(struct c_can_priv *priv,
+ enum reg index)
+{
+ return readw(priv->base + priv->regs[index]);
+}
+
+static void c_can_pci_write_reg_aligned_to_16bit(struct c_can_priv *priv,
+ enum reg index, u16 val)
+{
+ writew(val, priv->base + priv->regs[index]);
+}
+
+static u16 c_can_pci_read_reg_aligned_to_32bit(struct c_can_priv *priv,
+ enum reg index)
+{
+ return readw(priv->base + 2 * priv->regs[index]);
+}
+
+static void c_can_pci_write_reg_aligned_to_32bit(struct c_can_priv *priv,
+ enum reg index, u16 val)
+{
+ writew(val, priv->base + 2 * priv->regs[index]);
+}
+
+static int __devinit c_can_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct c_can_pci_data *c_can_pci_data = (void *)ent->driver_data;
+ struct c_can_priv *priv;
+ struct net_device *dev;
+ void __iomem *addr;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "pci_enable_device FAILED\n");
+ goto out;
+ }
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ dev_err(&pdev->dev, "pci_request_regions FAILED\n");
+ goto out_disable_device;
+ }
+
+ pci_set_master(pdev);
+ pci_enable_msi(pdev);
+
+ addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ if (!addr) {
+ dev_err(&pdev->dev,
+ "device has no PCI memory resources, "
+ "failing adapter\n");
+ ret = -ENOMEM;
+ goto out_release_regions;
+ }
+
+ /* allocate the c_can device */
+ dev = alloc_c_can_dev();
+ if (!dev) {
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ priv = netdev_priv(dev);
+ pci_set_drvdata(pdev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ dev->irq = pdev->irq;
+ priv->base = addr;
+
+ if (!c_can_pci_data->freq) {
+ dev_err(&pdev->dev, "no clock frequency defined\n");
+ ret = -ENODEV;
+ goto out_free_c_can;
+ } else {
+ priv->can.clock.freq = c_can_pci_data->freq;
+ }
+
+ /* Configure CAN type */
+ switch (c_can_pci_data->type) {
+ case C_CAN_DEVTYPE:
+ priv->regs = reg_map_c_can;
+ break;
+ case D_CAN_DEVTYPE:
+ priv->regs = reg_map_d_can;
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_free_c_can;
+ }
+
+ /* Configure access to registers */
+ switch (c_can_pci_data->reg_align) {
+ case C_CAN_REG_ALIGN_32:
+ priv->read_reg = c_can_pci_read_reg_aligned_to_32bit;
+ priv->write_reg = c_can_pci_write_reg_aligned_to_32bit;
+ break;
+ case C_CAN_REG_ALIGN_16:
+ priv->read_reg = c_can_pci_read_reg_aligned_to_16bit;
+ priv->write_reg = c_can_pci_write_reg_aligned_to_16bit;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_free_c_can;
+ }
+
+ ret = register_c_can_dev(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
+ KBUILD_MODNAME, ret);
+ goto out_free_c_can;
+ }
+
+ dev_dbg(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
+ KBUILD_MODNAME, priv->regs, dev->irq);
+
+ return 0;
+
+out_free_c_can:
+ pci_set_drvdata(pdev, NULL);
+ free_c_can_dev(dev);
+out_iounmap:
+ pci_iounmap(pdev, addr);
+out_release_regions:
+ pci_disable_msi(pdev);
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+out_disable_device:
+ pci_disable_device(pdev);
+out:
+ return ret;
+}
+
+static void __devexit c_can_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ unregister_c_can_dev(dev);
+
+ pci_set_drvdata(pdev, NULL);
+ free_c_can_dev(dev);
+
+ pci_iounmap(pdev, priv->base);
+ pci_disable_msi(pdev);
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct c_can_pci_data c_can_sta2x11= {
+ .type = C_CAN_DEVTYPE,
+ .reg_align = C_CAN_REG_ALIGN_32,
+ .freq = 52000000, /* 52 Mhz */
+};
+
+#define C_CAN_ID(_vend, _dev, _driverdata) { \
+ PCI_DEVICE(_vend, _dev), \
+ .driver_data = (unsigned long)&_driverdata, \
+}
+static DEFINE_PCI_DEVICE_TABLE(c_can_pci_tbl) = {
+ C_CAN_ID(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_CAN,
+ c_can_sta2x11),
+ {},
+};
+static struct pci_driver c_can_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = c_can_pci_tbl,
+ .probe = c_can_pci_probe,
+ .remove = __devexit_p(c_can_pci_remove),
+};
+
+module_pci_driver(c_can_pci_driver);
+
+MODULE_AUTHOR("Federico Vaga <federico.vaga@gmail.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PCI CAN bus driver for Bosch C_CAN/D_CAN controller");
+MODULE_DEVICE_TABLE(pci, c_can_pci_tbl);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 5e1a5ff..f0921d1 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -42,27 +42,27 @@
* Handle the same by providing a common read/write interface.
*/
static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv,
- void *reg)
+ enum reg index)
{
- return readw(reg);
+ return readw(priv->base + priv->regs[index]);
}
static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv,
- void *reg, u16 val)
+ enum reg index, u16 val)
{
- writew(val, reg);
+ writew(val, priv->base + priv->regs[index]);
}
static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv,
- void *reg)
+ enum reg index)
{
- return readw(reg + (long)reg - (long)priv->regs);
+ return readw(priv->base + 2 * priv->regs[index]);
}
static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
- void *reg, u16 val)
+ enum reg index, u16 val)
{
- writew(val, reg + (long)reg - (long)priv->regs);
+ writew(val, priv->base + 2 * priv->regs[index]);
}
static int __devinit c_can_plat_probe(struct platform_device *pdev)
@@ -71,6 +71,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
void __iomem *addr;
struct net_device *dev;
struct c_can_priv *priv;
+ const struct platform_device_id *id;
struct resource *mem;
int irq;
#ifdef CONFIG_HAVE_CLK
@@ -115,26 +116,40 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
}
priv = netdev_priv(dev);
+ id = platform_get_device_id(pdev);
+ switch (id->driver_data) {
+ case C_CAN_DEVTYPE:
+ priv->regs = reg_map_c_can;
+ switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
+ case IORESOURCE_MEM_32BIT:
+ priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
+ priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
+ break;
+ case IORESOURCE_MEM_16BIT:
+ default:
+ priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
+ priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+ break;
+ }
+ break;
+ case D_CAN_DEVTYPE:
+ priv->regs = reg_map_d_can;
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+ priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
+ priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+ break;
+ default:
+ ret = -EINVAL;
+ goto exit_free_device;
+ }
dev->irq = irq;
- priv->regs = addr;
+ priv->base = addr;
#ifdef CONFIG_HAVE_CLK
priv->can.clock.freq = clk_get_rate(clk);
priv->priv = clk;
#endif
- switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
- case IORESOURCE_MEM_32BIT:
- priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
- priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
- break;
- case IORESOURCE_MEM_16BIT:
- default:
- priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
- priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
- break;
- }
-
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -146,7 +161,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
- KBUILD_MODNAME, priv->regs, dev->irq);
+ KBUILD_MODNAME, priv->base, dev->irq);
return 0;
exit_free_device:
@@ -176,7 +191,7 @@ static int __devexit c_can_plat_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
free_c_can_dev(dev);
- iounmap(priv->regs);
+ iounmap(priv->base);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
@@ -188,6 +203,20 @@ static int __devexit c_can_plat_remove(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id c_can_id_table[] = {
+ {
+ .name = KBUILD_MODNAME,
+ .driver_data = C_CAN_DEVTYPE,
+ }, {
+ .name = "c_can",
+ .driver_data = C_CAN_DEVTYPE,
+ }, {
+ .name = "d_can",
+ .driver_data = D_CAN_DEVTYPE,
+ }, {
+ }
+};
+
static struct platform_driver c_can_plat_driver = {
.driver = {
.name = KBUILD_MODNAME,
@@ -195,6 +224,7 @@ static struct platform_driver c_can_plat_driver = {
},
.probe = c_can_plat_probe,
.remove = __devexit_p(c_can_plat_remove),
+ .id_table = c_can_id_table,
};
module_platform_driver(c_can_plat_driver);
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index d42a6a7..a138db1 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -695,7 +695,7 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
netif_wake_queue(dev);
}
-irqreturn_t cc770_interrupt(int irq, void *dev_id)
+static irqreturn_t cc770_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct cc770_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index 53115ee..688371c 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -154,7 +154,7 @@ static int __devinit cc770_get_platform_data(struct platform_device *pdev,
struct cc770_platform_data *pdata = pdev->dev.platform_data;
priv->can.clock.freq = pdata->osc_freq;
- if (priv->cpu_interface | CPUIF_DSC)
+ if (priv->cpu_interface & CPUIF_DSC)
priv->can.clock.freq /= 2;
priv->clkout = pdata->cor;
priv->bus_config = pdata->bcr;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index f03d7a48..963e2cc 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -33,6 +33,39 @@ MODULE_DESCRIPTION(MOD_DESC);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+/* CAN DLC to real data length conversion helpers */
+
+static const u8 dlc2len[] = {0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 12, 16, 20, 24, 32, 48, 64};
+
+/* get data length from can_dlc with sanitized can_dlc */
+u8 can_dlc2len(u8 can_dlc)
+{
+ return dlc2len[can_dlc & 0x0F];
+}
+EXPORT_SYMBOL_GPL(can_dlc2len);
+
+static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */
+ 9, 9, 9, 9, /* 9 - 12 */
+ 10, 10, 10, 10, /* 13 - 16 */
+ 11, 11, 11, 11, /* 17 - 20 */
+ 12, 12, 12, 12, /* 21 - 24 */
+ 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */
+ 15, 15, 15, 15, 15, 15, 15, 15}; /* 57 - 64 */
+
+/* map the sanitized data length to an appropriate data length code */
+u8 can_len2dlc(u8 len)
+{
+ if (unlikely(len > 64))
+ return 0xF;
+
+ return len2dlc[len];
+}
+EXPORT_SYMBOL_GPL(can_len2dlc);
+
#ifdef CONFIG_CAN_CALC_BITTIMING
#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
@@ -368,7 +401,7 @@ EXPORT_SYMBOL_GPL(can_free_echo_skb);
/*
* CAN device restart for bus-off recovery
*/
-void can_restart(unsigned long data)
+static void can_restart(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct can_priv *priv = netdev_priv(dev);
@@ -454,7 +487,7 @@ EXPORT_SYMBOL_GPL(can_bus_off);
static void can_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
- dev->mtu = sizeof(struct can_frame);
+ dev->mtu = CAN_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 10;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 38c0690..81324a1 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -34,6 +34,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
@@ -165,10 +166,21 @@ struct flexcan_regs {
u32 imask1; /* 0x28 */
u32 iflag2; /* 0x2c */
u32 iflag1; /* 0x30 */
- u32 _reserved2[19];
+ u32 crl2; /* 0x34 */
+ u32 esr2; /* 0x38 */
+ u32 imeur; /* 0x3c */
+ u32 lrfr; /* 0x40 */
+ u32 crcr; /* 0x44 */
+ u32 rxfgmask; /* 0x48 */
+ u32 rxfir; /* 0x4c */
+ u32 _reserved3[12];
struct flexcan_mb cantxfg[64];
};
+struct flexcan_devtype_data {
+ u32 hw_ver; /* hardware controller version */
+};
+
struct flexcan_priv {
struct can_priv can;
struct net_device *dev;
@@ -180,6 +192,15 @@ struct flexcan_priv {
struct clk *clk;
struct flexcan_platform_data *pdata;
+ struct flexcan_devtype_data *devtype_data;
+};
+
+static struct flexcan_devtype_data fsl_p1010_devtype_data = {
+ .hw_ver = 3,
+};
+
+static struct flexcan_devtype_data fsl_imx6q_devtype_data = {
+ .hw_ver = 10,
};
static struct can_bittiming_const flexcan_bittiming_const = {
@@ -750,6 +771,9 @@ static int flexcan_chip_start(struct net_device *dev)
flexcan_write(0x0, &regs->rx14mask);
flexcan_write(0x0, &regs->rx15mask);
+ if (priv->devtype_data->hw_ver >= 10)
+ flexcan_write(0x0, &regs->rxfgmask);
+
flexcan_transceiver_switch(priv, 1);
/* synchronize with the can bus */
@@ -922,8 +946,21 @@ static void __devexit unregister_flexcandev(struct net_device *dev)
unregister_candev(dev);
}
+static const struct of_device_id flexcan_of_match[] = {
+ { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
+ { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
+ { /* sentinel */ },
+};
+
+static const struct platform_device_id flexcan_id_table[] = {
+ { .name = "flexcan", .driver_data = (kernel_ulong_t)&fsl_p1010_devtype_data, },
+ { /* sentinel */ },
+};
+
static int __devinit flexcan_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id;
+ struct flexcan_devtype_data *devtype_data;
struct net_device *dev;
struct flexcan_priv *priv;
struct resource *mem;
@@ -938,14 +975,9 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
if (IS_ERR(pinctrl))
return PTR_ERR(pinctrl);
- if (pdev->dev.of_node) {
- const u32 *clock_freq_p;
-
- clock_freq_p = of_get_property(pdev->dev.of_node,
- "clock-frequency", NULL);
- if (clock_freq_p)
- clock_freq = *clock_freq_p;
- }
+ if (pdev->dev.of_node)
+ of_property_read_u32(pdev->dev.of_node,
+ "clock-frequency", &clock_freq);
if (!clock_freq) {
clk = clk_get(&pdev->dev, NULL);
@@ -982,6 +1014,17 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
goto failed_alloc;
}
+ of_id = of_match_device(flexcan_of_match, &pdev->dev);
+ if (of_id) {
+ devtype_data = of_id->data;
+ } else if (pdev->id_entry->driver_data) {
+ devtype_data = (struct flexcan_devtype_data *)
+ pdev->id_entry->driver_data;
+ } else {
+ err = -ENODEV;
+ goto failed_devtype;
+ }
+
dev->netdev_ops = &flexcan_netdev_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
@@ -998,6 +1041,7 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
priv->dev = dev;
priv->clk = clk;
priv->pdata = pdev->dev.platform_data;
+ priv->devtype_data = devtype_data;
netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
@@ -1016,6 +1060,7 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
return 0;
failed_register:
+ failed_devtype:
free_candev(dev);
failed_alloc:
iounmap(base);
@@ -1049,12 +1094,41 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id flexcan_of_match[] = {
- {
- .compatible = "fsl,p1010-flexcan",
- },
- {},
-};
+#ifdef CONFIG_PM
+static int flexcan_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct flexcan_priv *priv = netdev_priv(dev);
+
+ flexcan_chip_disable(priv);
+
+ if (netif_running(dev)) {
+ netif_stop_queue(dev);
+ netif_device_detach(dev);
+ }
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ return 0;
+}
+
+static int flexcan_resume(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct flexcan_priv *priv = netdev_priv(dev);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ if (netif_running(dev)) {
+ netif_device_attach(dev);
+ netif_start_queue(dev);
+ }
+ flexcan_chip_enable(priv);
+
+ return 0;
+}
+#else
+#define flexcan_suspend NULL
+#define flexcan_resume NULL
+#endif
static struct platform_driver flexcan_driver = {
.driver = {
@@ -1064,6 +1138,9 @@ static struct platform_driver flexcan_driver = {
},
.probe = flexcan_probe,
.remove = __devexit_p(flexcan_remove),
+ .suspend = flexcan_suspend,
+ .resume = flexcan_resume,
+ .id_table = flexcan_id_table,
};
module_platform_driver(flexcan_driver);
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 346785c..9120a36 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -1020,8 +1020,7 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
GFP_DMA);
if (priv->spi_tx_buf) {
- priv->spi_rx_buf = (u8 *)(priv->spi_tx_buf +
- (PAGE_SIZE / 2));
+ priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2));
priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma +
(PAGE_SIZE / 2));
} else {
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index ea2d942..4f93c0b 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -70,13 +70,12 @@ MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
{
- struct can_frame *cf = (struct can_frame *)skb->data;
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cfd->len;
- skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
skb->dev = dev;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -86,7 +85,7 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
{
- struct can_frame *cf = (struct can_frame *)skb->data;
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
int loop;
@@ -94,7 +93,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
stats->tx_packets++;
- stats->tx_bytes += cf->can_dlc;
+ stats->tx_bytes += cfd->len;
/* set flag whether this packet has to be looped back */
loop = skb->pkt_type == PACKET_LOOPBACK;
@@ -108,7 +107,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
* CAN core already did the echo for us
*/
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cfd->len;
}
kfree_skb(skb);
return NETDEV_TX_OK;
@@ -133,14 +132,28 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static int vcan_change_mtu(struct net_device *dev, int new_mtu)
+{
+ /* Do not allow changing the MTU while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
static const struct net_device_ops vcan_netdev_ops = {
.ndo_start_xmit = vcan_tx,
+ .ndo_change_mtu = vcan_change_mtu,
};
static void vcan_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
- dev->mtu = sizeof(struct can_frame);
+ dev->mtu = CAN_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 0;
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 9c755db..f0c8bd5 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1008,7 +1008,7 @@ e100_send_mdio_bit(unsigned char bit)
}
static unsigned char
-e100_receive_mdio_bit()
+e100_receive_mdio_bit(void)
{
unsigned char bit;
*R_NETWORK_MGM_CTRL = 0;
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 442d91a..9d6a067 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -40,18 +40,6 @@
static int numdummies = 1;
-static int dummy_set_address(struct net_device *dev, void *p)
-{
- struct sockaddr *sa = p;
-
- if (!is_valid_ether_addr(sa->sa_data))
- return -EADDRNOTAVAIL;
-
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
- memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
- return 0;
-}
-
/* fake multicast ability */
static void set_multicast_list(struct net_device *dev)
{
@@ -118,7 +106,7 @@ static const struct net_device_ops dummy_netdev_ops = {
.ndo_start_xmit = dummy_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = set_multicast_list,
- .ndo_set_mac_address = dummy_set_address,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_get_stats64 = dummy_get_stats64,
};
@@ -134,6 +122,7 @@ static void dummy_setup(struct net_device *dev)
dev->tx_queue_len = 0;
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
eth_hw_addr_random(dev);
@@ -187,8 +176,10 @@ static int __init dummy_init_module(void)
rtnl_lock();
err = __rtnl_link_register(&dummy_link_ops);
- for (i = 0; i < numdummies && !err; i++)
+ for (i = 0; i < numdummies && !err; i++) {
err = dummy_init_one();
+ cond_resched();
+ }
if (err < 0)
__rtnl_link_unregister(&dummy_link_ops);
rtnl_unlock();
diff --git a/drivers/net/ethernet/3com/3c501.c b/drivers/net/ethernet/3com/3c501.c
index bf73e1a..2038eaa 100644
--- a/drivers/net/ethernet/3com/3c501.c
+++ b/drivers/net/ethernet/3com/3c501.c
@@ -143,7 +143,7 @@ static int irq = 5;
static int mem_start;
/**
- * el1_probe: - probe for a 3c501
+ * el1_probe - probe for a 3c501
* @dev: The device structure passed in to probe.
*
* This can be called from two places. The network layer will probe using
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index 2e53867..e1219e0 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -162,6 +162,20 @@ config MAC8390
and read the Ethernet-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
+config MCF8390
+ tristate "ColdFire NS8390 based Ethernet support"
+ depends on COLDFIRE
+ select CRC32
+ ---help---
+ This driver is for Ethernet devices using an NS8390-compatible
+ chipset on many common ColdFire CPU based boards. Many of the older
+ Freescale dev boards use this, and some other common boards like
+ some SnapGear routers do as well.
+
+ If you have one of these boards and want to use the network interface
+ on them then choose Y. To compile this driver as a module, choose M
+ here, the module will be called mcf8390.
+
config NE2000
tristate "NE2000/NE1000 support"
depends on (ISA || (Q40 && m) || M32R || MACH_TX49XX)
diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile
index d13790b..f43038b 100644
--- a/drivers/net/ethernet/8390/Makefile
+++ b/drivers/net/ethernet/8390/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390p.o
obj-$(CONFIG_HPLAN) += hp.o 8390p.o
obj-$(CONFIG_HYDRA) += hydra.o 8390.o
obj-$(CONFIG_LNE390) += lne390.o 8390.o
+obj-$(CONFIG_MCF8390) += mcf8390.o 8390.o
obj-$(CONFIG_NE2000) += ne.o 8390p.o
obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o
obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index 9239592..912ed7a 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -454,7 +454,7 @@ apne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int rin
buf[count-1] = inb(NE_BASE + NE_DATAPORT);
}
} else {
- ptrc = (char*)buf;
+ ptrc = buf;
for (cnt = 0; cnt < count; cnt++)
*ptrc++ = inb(NE_BASE + NE_DATAPORT);
}
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
new file mode 100644
index 0000000..230efd6
--- /dev/null
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -0,0 +1,480 @@
+/*
+ * Support for ColdFire CPU based boards using a NS8390 Ethernet device.
+ *
+ * Derived from the many other 8390 drivers.
+ *
+ * (C) Copyright 2012, Greg Ungerer <gerg@uclinux.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/jiffies.h>
+#include <linux/io.h>
+#include <asm/mcf8390.h>
+
+static const char version[] =
+ "mcf8390.c: (15-06-2012) Greg Ungerer <gerg@uclinux.org>";
+
+#define NE_CMD 0x00
+#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset */
+#define NE_RESET 0x1f /* Issue a read to reset ,a write to clear */
+#define NE_EN0_ISR 0x07
+#define NE_EN0_DCFG 0x0e
+#define NE_EN0_RSARLO 0x08
+#define NE_EN0_RSARHI 0x09
+#define NE_EN0_RCNTLO 0x0a
+#define NE_EN0_RXCR 0x0c
+#define NE_EN0_TXCR 0x0d
+#define NE_EN0_RCNTHI 0x0b
+#define NE_EN0_IMR 0x0f
+
+#define NESM_START_PG 0x40 /* First page of TX buffer */
+#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+#ifdef NE2000_ODDOFFSET
+/*
+ * A lot of the ColdFire boards use a separate address region for odd offset
+ * register addresses. The following functions convert and map as required.
+ * Note that the data port accesses are treated a little differently, and
+ * always accessed via the insX/outsX functions.
+ */
+static inline u32 NE_PTR(u32 addr)
+{
+ if (addr & 1)
+ return addr - 1 + NE2000_ODDOFFSET;
+ return addr;
+}
+
+static inline u32 NE_DATA_PTR(u32 addr)
+{
+ return addr;
+}
+
+void ei_outb(u32 val, u32 addr)
+{
+ NE2000_BYTE *rp;
+
+ rp = (NE2000_BYTE *) NE_PTR(addr);
+ *rp = RSWAP(val);
+}
+
+#define ei_inb ei_inb
+u8 ei_inb(u32 addr)
+{
+ NE2000_BYTE *rp, val;
+
+ rp = (NE2000_BYTE *) NE_PTR(addr);
+ val = *rp;
+ return (u8) (RSWAP(val) & 0xff);
+}
+
+void ei_insb(u32 addr, void *vbuf, int len)
+{
+ NE2000_BYTE *rp, val;
+ u8 *buf;
+
+ buf = (u8 *) vbuf;
+ rp = (NE2000_BYTE *) NE_DATA_PTR(addr);
+ for (; (len > 0); len--) {
+ val = *rp;
+ *buf++ = RSWAP(val);
+ }
+}
+
+void ei_insw(u32 addr, void *vbuf, int len)
+{
+ volatile u16 *rp;
+ u16 w, *buf;
+
+ buf = (u16 *) vbuf;
+ rp = (volatile u16 *) NE_DATA_PTR(addr);
+ for (; (len > 0); len--) {
+ w = *rp;
+ *buf++ = BSWAP(w);
+ }
+}
+
+void ei_outsb(u32 addr, const void *vbuf, int len)
+{
+ NE2000_BYTE *rp, val;
+ u8 *buf;
+
+ buf = (u8 *) vbuf;
+ rp = (NE2000_BYTE *) NE_DATA_PTR(addr);
+ for (; (len > 0); len--) {
+ val = *buf++;
+ *rp = RSWAP(val);
+ }
+}
+
+void ei_outsw(u32 addr, const void *vbuf, int len)
+{
+ volatile u16 *rp;
+ u16 w, *buf;
+
+ buf = (u16 *) vbuf;
+ rp = (volatile u16 *) NE_DATA_PTR(addr);
+ for (; (len > 0); len--) {
+ w = *buf++;
+ *rp = BSWAP(w);
+ }
+}
+
+#else /* !NE2000_ODDOFFSET */
+
+#define ei_inb inb
+#define ei_outb outb
+#define ei_insb insb
+#define ei_insw insw
+#define ei_outsb outsb
+#define ei_outsw outsw
+
+#endif /* !NE2000_ODDOFFSET */
+
+#define ei_inb_p ei_inb
+#define ei_outb_p ei_outb
+
+#include "lib8390.c"
+
+/*
+ * Hard reset the card. This used to pause for the same period that a
+ * 8390 reset command required, but that shouldn't be necessary.
+ */
+static void mcf8390_reset_8390(struct net_device *dev)
+{
+ unsigned long reset_start_time = jiffies;
+ u32 addr = dev->base_addr;
+
+ if (ei_debug > 1)
+ netdev_dbg(dev, "resetting the 8390 t=%ld...\n", jiffies);
+
+ ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
+
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((ei_inb(addr + NE_EN0_ISR) & ENISR_RESET) == 0) {
+ if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) {
+ netdev_warn(dev, "%s: did not complete\n", __func__);
+ break;
+ }
+ }
+
+ ei_outb(ENISR_RESET, addr + NE_EN0_ISR);
+}
+
+/*
+ * This *shouldn't* happen.
+ * If it does, it's the last thing you'll see
+ */
+static void mcf8390_dmaing_err(const char *func, struct net_device *dev,
+ struct ei_device *ei_local)
+{
+ netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
+ func, ei_local->dmaing, ei_local->irqlock);
+}
+
+/*
+ * Grab the 8390 specific header. Similar to the block_input routine, but
+ * we don't need to be concerned with ring wrap as the header will be at
+ * the start of a page, so we optimize accordingly.
+ */
+static void mcf8390_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ u32 addr = dev->base_addr;
+
+ if (ei_local->dmaing) {
+ mcf8390_dmaing_err(__func__, dev, ei_local);
+ return;
+ }
+
+ ei_local->dmaing |= 0x01;
+ ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, addr + NE_CMD);
+ ei_outb(ENISR_RDC, addr + NE_EN0_ISR);
+ ei_outb(sizeof(struct e8390_pkt_hdr), addr + NE_EN0_RCNTLO);
+ ei_outb(0, addr + NE_EN0_RCNTHI);
+ ei_outb(0, addr + NE_EN0_RSARLO); /* On page boundary */
+ ei_outb(ring_page, addr + NE_EN0_RSARHI);
+ ei_outb(E8390_RREAD + E8390_START, addr + NE_CMD);
+
+ ei_insw(addr + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr) >> 1);
+
+ outb(ENISR_RDC, addr + NE_EN0_ISR); /* Ack intr */
+ ei_local->dmaing &= ~0x01;
+
+ hdr->count = cpu_to_le16(hdr->count);
+}
+
+/*
+ * Block input and output, similar to the Crynwr packet driver.
+ * If you are porting to a new ethercard, look at the packet driver source
+ * for hints. The NEx000 doesn't share the on-board packet memory --
+ * you have to put the packet out through the "remote DMA" dataport
+ * using z_writeb.
+ */
+static void mcf8390_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ u32 addr = dev->base_addr;
+ char *buf = skb->data;
+
+ if (ei_local->dmaing) {
+ mcf8390_dmaing_err(__func__, dev, ei_local);
+ return;
+ }
+
+ ei_local->dmaing |= 0x01;
+ ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, addr + NE_CMD);
+ ei_outb(ENISR_RDC, addr + NE_EN0_ISR);
+ ei_outb(count & 0xff, addr + NE_EN0_RCNTLO);
+ ei_outb(count >> 8, addr + NE_EN0_RCNTHI);
+ ei_outb(ring_offset & 0xff, addr + NE_EN0_RSARLO);
+ ei_outb(ring_offset >> 8, addr + NE_EN0_RSARHI);
+ ei_outb(E8390_RREAD + E8390_START, addr + NE_CMD);
+
+ ei_insw(addr + NE_DATAPORT, buf, count >> 1);
+ if (count & 1)
+ buf[count - 1] = ei_inb(addr + NE_DATAPORT);
+
+ ei_outb(ENISR_RDC, addr + NE_EN0_ISR); /* Ack intr */
+ ei_local->dmaing &= ~0x01;
+}
+
+static void mcf8390_block_output(struct net_device *dev, int count,
+ const unsigned char *buf,
+ const int start_page)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ u32 addr = dev->base_addr;
+ unsigned long dma_start;
+
+ /* Make sure we transfer all bytes if 16bit IO writes */
+ if (count & 0x1)
+ count++;
+
+ if (ei_local->dmaing) {
+ mcf8390_dmaing_err(__func__, dev, ei_local);
+ return;
+ }
+
+ ei_local->dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ ei_outb(E8390_PAGE0 + E8390_START + E8390_NODMA, addr + NE_CMD);
+
+ ei_outb(ENISR_RDC, addr + NE_EN0_ISR);
+
+ /* Now the normal output. */
+ ei_outb(count & 0xff, addr + NE_EN0_RCNTLO);
+ ei_outb(count >> 8, addr + NE_EN0_RCNTHI);
+ ei_outb(0x00, addr + NE_EN0_RSARLO);
+ ei_outb(start_page, addr + NE_EN0_RSARHI);
+ ei_outb(E8390_RWRITE + E8390_START, addr + NE_CMD);
+
+ ei_outsw(addr + NE_DATAPORT, buf, count >> 1);
+
+ dma_start = jiffies;
+ while ((ei_inb(addr + NE_EN0_ISR) & ENISR_RDC) == 0) {
+ if (time_after(jiffies, dma_start + 2 * HZ / 100)) { /* 20ms */
+ netdev_err(dev, "timeout waiting for Tx RDC\n");
+ mcf8390_reset_8390(dev);
+ __NS8390_init(dev, 1);
+ break;
+ }
+ }
+
+ ei_outb(ENISR_RDC, addr + NE_EN0_ISR); /* Ack intr */
+ ei_local->dmaing &= ~0x01;
+}
+
+static const struct net_device_ops mcf8390_netdev_ops = {
+ .ndo_open = __ei_open,
+ .ndo_stop = __ei_close,
+ .ndo_start_xmit = __ei_start_xmit,
+ .ndo_tx_timeout = __ei_tx_timeout,
+ .ndo_get_stats = __ei_get_stats,
+ .ndo_set_rx_mode = __ei_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = __ei_poll,
+#endif
+};
+
+static int mcf8390_init(struct net_device *dev)
+{
+ static u32 offsets[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ };
+ struct ei_device *ei_local = netdev_priv(dev);
+ unsigned char SA_prom[32];
+ u32 addr = dev->base_addr;
+ int start_page, stop_page;
+ int i, ret;
+
+ mcf8390_reset_8390(dev);
+
+ /*
+ * Read the 16 bytes of station address PROM.
+ * We must first initialize registers,
+ * similar to NS8390_init(eifdev, 0).
+ * We can't reliably read the SAPROM address without this.
+ * (I learned the hard way!).
+ */
+ {
+ static const struct {
+ u32 value;
+ u32 offset;
+ } program_seq[] = {
+ {E8390_NODMA + E8390_PAGE0 + E8390_STOP, NE_CMD},
+ /* Select page 0 */
+ {0x48, NE_EN0_DCFG}, /* 0x48: Set byte-wide access */
+ {0x00, NE_EN0_RCNTLO}, /* Clear the count regs */
+ {0x00, NE_EN0_RCNTHI},
+ {0x00, NE_EN0_IMR}, /* Mask completion irq */
+ {0xFF, NE_EN0_ISR},
+ {E8390_RXOFF, NE_EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, NE_EN0_TXCR}, /* 0x02 and loopback mode */
+ {32, NE_EN0_RCNTLO},
+ {0x00, NE_EN0_RCNTHI},
+ {0x00, NE_EN0_RSARLO}, /* DMA starting at 0x0000 */
+ {0x00, NE_EN0_RSARHI},
+ {E8390_RREAD + E8390_START, NE_CMD},
+ };
+ for (i = 0; i < ARRAY_SIZE(program_seq); i++) {
+ ei_outb(program_seq[i].value,
+ addr + program_seq[i].offset);
+ }
+ }
+
+ for (i = 0; i < 16; i++) {
+ SA_prom[i] = ei_inb(addr + NE_DATAPORT);
+ ei_inb(addr + NE_DATAPORT);
+ }
+
+ /* We must set the 8390 for word mode. */
+ ei_outb(0x49, addr + NE_EN0_DCFG);
+ start_page = NESM_START_PG;
+ stop_page = NESM_STOP_PG;
+
+ /* Install the Interrupt handler */
+ ret = request_irq(dev->irq, __ei_interrupt, 0, dev->name, dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[i] = SA_prom[i];
+
+ netdev_dbg(dev, "Found ethernet address: %pM\n", dev->dev_addr);
+
+ ei_local->name = "mcf8390";
+ ei_local->tx_start_page = start_page;
+ ei_local->stop_page = stop_page;
+ ei_local->word16 = 1;
+ ei_local->rx_start_page = start_page + TX_PAGES;
+ ei_local->reset_8390 = mcf8390_reset_8390;
+ ei_local->block_input = mcf8390_block_input;
+ ei_local->block_output = mcf8390_block_output;
+ ei_local->get_8390_hdr = mcf8390_get_8390_hdr;
+ ei_local->reg_offset = offsets;
+
+ dev->netdev_ops = &mcf8390_netdev_ops;
+ __NS8390_init(dev, 0);
+ ret = register_netdev(dev);
+ if (ret) {
+ free_irq(dev->irq, dev);
+ return ret;
+ }
+
+ netdev_info(dev, "addr=0x%08x irq=%d, Ethernet Address %pM\n",
+ addr, dev->irq, dev->dev_addr);
+ return 0;
+}
+
+static int mcf8390_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct ei_device *ei_local;
+ struct resource *mem, *irq;
+ resource_size_t msize;
+ int ret;
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (irq == NULL) {
+ dev_err(&pdev->dev, "no IRQ specified?\n");
+ return -ENXIO;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem == NULL) {
+ dev_err(&pdev->dev, "no memory address specified?\n");
+ return -ENXIO;
+ }
+ msize = resource_size(mem);
+ if (!request_mem_region(mem->start, msize, pdev->name))
+ return -EBUSY;
+
+ dev = ____alloc_ei_netdev(0);
+ if (dev == NULL) {
+ release_mem_region(mem->start, msize);
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ platform_set_drvdata(pdev, dev);
+ ei_local = netdev_priv(dev);
+
+ dev->irq = irq->start;
+ dev->base_addr = mem->start;
+
+ ret = mcf8390_init(dev);
+ if (ret) {
+ release_mem_region(mem->start, msize);
+ free_netdev(dev);
+ return ret;
+ }
+ return 0;
+}
+
+static int mcf8390_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct resource *mem;
+
+ unregister_netdev(dev);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem)
+ release_mem_region(mem->start, resource_size(mem));
+ free_netdev(dev);
+ return 0;
+}
+
+static struct platform_driver mcf8390_drv = {
+ .driver = {
+ .name = "mcf8390",
+ .owner = THIS_MODULE,
+ },
+ .probe = mcf8390_probe,
+ .remove = mcf8390_remove,
+};
+
+module_platform_driver(mcf8390_drv);
+
+MODULE_DESCRIPTION("MCF8390 ColdFire NS8390 driver");
+MODULE_AUTHOR("Greg Ungerer <gerg@uclinux.org>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mcf8390");
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 3485011..9c77c73 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1014,7 +1014,7 @@ static int greth_set_mac_add(struct net_device *dev, void *p)
struct greth_regs *regs;
greth = netdev_priv(dev);
- regs = (struct greth_regs *) greth->regs;
+ regs = greth->regs;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -1036,7 +1036,7 @@ static void greth_set_hash_filter(struct net_device *dev)
{
struct netdev_hw_addr *ha;
struct greth_private *greth = netdev_priv(dev);
- struct greth_regs *regs = (struct greth_regs *) greth->regs;
+ struct greth_regs *regs = greth->regs;
u32 mc_filter[2];
unsigned int bitnr;
@@ -1055,7 +1055,7 @@ static void greth_set_multicast_list(struct net_device *dev)
{
int cfg;
struct greth_private *greth = netdev_priv(dev);
- struct greth_regs *regs = (struct greth_regs *) greth->regs;
+ struct greth_regs *regs = greth->regs;
cfg = GRETH_REGLOAD(regs->control);
if (dev->flags & IFF_PROMISC)
@@ -1414,7 +1414,7 @@ static int __devinit greth_of_probe(struct platform_device *ofdev)
goto error1;
}
- regs = (struct greth_regs *) greth->regs;
+ regs = greth->regs;
greth->irq = ofdev->archdata.irqs[0];
dev_set_drvdata(greth->dev, dev);
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 75299f5..7203b52 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -623,7 +623,7 @@ static int lance_rx(struct net_device *dev)
skb_put(skb, len); /* make room */
cp_from_buf(lp->type, skb->data,
- (char *)lp->rx_buf_ptr_cpu[entry], len);
+ lp->rx_buf_ptr_cpu[entry], len);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
@@ -919,7 +919,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
*lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);
*lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;
- cp_to_buf(lp->type, (char *)lp->tx_buf_ptr_cpu[entry], skb->data, len);
+ cp_to_buf(lp->type, lp->tx_buf_ptr_cpu[entry], skb->data, len);
/* Now, give the packet to the lance */
*lib_ptr(ib, btx_ring[entry].tmd1, lp->type) =
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index a6e2e84..5c72843 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -873,10 +873,9 @@ lance_init_ring(struct net_device *dev, gfp_t gfp)
skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
lp->rx_skbuff[i] = skb;
- if (skb) {
- skb->dev = dev;
+ if (skb)
rx_buff = skb->data;
- } else
+ else
rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
if (rx_buff == NULL)
lp->rx_ring[i].base = 0;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index ab7ff86..a92ddee7 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -228,7 +228,7 @@ static int __devinit mace_probe(struct platform_device *pdev)
* bits are reversed.
*/
- addr = (void *)MACE_PROM;
+ addr = MACE_PROM;
for (j = 0; j < 6; ++j) {
u8 v = bitrev8(addr[j<<4]);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index ff9c738..801f012 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -602,7 +602,7 @@ int atl1c_phy_reset(struct atl1c_hw *hw)
int atl1c_phy_init(struct atl1c_hw *hw)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
int ret_val;
u16 mii_bmcr_data = BMCR_RESET;
@@ -696,7 +696,7 @@ int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex)
/* select one link mode to get lower power consumption */
int atl1c_phy_to_ps_link(struct atl1c_hw *hw)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
int ret = 0;
u16 autoneg_advertised = ADVERTISED_10baseT_Half;
@@ -768,7 +768,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw)
int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
u32 master_ctrl, mac_ctrl, phy_ctrl;
u32 wol_ctrl, speed;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 9cc1570..36d3783 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -166,7 +166,7 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
msleep(5);
}
-/*
+/**
* atl1c_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
*/
@@ -179,7 +179,7 @@ static inline void atl1c_irq_enable(struct atl1c_adapter *adapter)
}
}
-/*
+/**
* atl1c_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
*/
@@ -192,7 +192,7 @@ static inline void atl1c_irq_disable(struct atl1c_adapter *adapter)
synchronize_irq(adapter->pdev->irq);
}
-/*
+/**
* atl1c_irq_reset - reset interrupt confiure on the NIC
* @adapter: board private structure
*/
@@ -220,7 +220,7 @@ static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
return data;
}
-/*
+/**
* atl1c_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
@@ -261,7 +261,6 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
if ((phy_data & BMSR_LSTATUS) == 0) {
/* link down */
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
hw->hibernate = true;
if (atl1c_reset_mac(hw) != 0)
if (netif_msg_hw(adapter))
@@ -361,7 +360,7 @@ static void atl1c_del_timer(struct atl1c_adapter *adapter)
}
-/*
+/**
* atl1c_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
@@ -374,7 +373,7 @@ static void atl1c_tx_timeout(struct net_device *netdev)
schedule_work(&adapter->common_task);
}
-/*
+/**
* atl1c_set_multi - Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -453,7 +452,7 @@ static void atl1c_restore_vlan(struct atl1c_adapter *adapter)
atl1c_vlan_mode(adapter->netdev, adapter->netdev->features);
}
-/*
+/**
* atl1c_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -518,7 +517,7 @@ static int atl1c_set_features(struct net_device *netdev,
return 0;
}
-/*
+/**
* atl1c_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -577,12 +576,6 @@ static void atl1c_mdio_write(struct net_device *netdev, int phy_id,
atl1c_write_phy_reg(&adapter->hw, reg_num, val);
}
-/*
- * atl1c_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl1c_mii_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
@@ -633,12 +626,6 @@ out:
return retval;
}
-/*
- * atl1c_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -651,7 +638,7 @@ static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
-/*
+/**
* atl1c_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
*
@@ -755,7 +742,7 @@ static void __devinit atl1c_patch_assign(struct atl1c_hw *hw)
i++;
}
}
-/*
+/**
* atl1c_sw_init - Initialize general software structures (struct atl1c_adapter)
* @adapter: board private structure to initialize
*
@@ -853,7 +840,7 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
buffer_info->skb = NULL;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
}
-/*
+/**
* atl1c_clean_tx_ring - Free Tx-skb
* @adapter: board private structure
*/
@@ -878,7 +865,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
tpd_ring->next_to_use = 0;
}
-/*
+/**
* atl1c_clean_rx_ring - Free rx-reservation skbs
* @adapter: board private structure
*/
@@ -931,7 +918,7 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
}
}
-/*
+/**
* atl1c_free_ring_resources - Free Tx / RX descriptor Resources
* @adapter: board private structure
*
@@ -954,7 +941,7 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
}
}
-/*
+/**
* atl1c_setup_mem_resources - allocate Tx / RX descriptor resources
* @adapter: board private structure
*
@@ -989,12 +976,12 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
}
for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
tpd_ring[i].buffer_info =
- (struct atl1c_buffer *) (tpd_ring->buffer_info + count);
+ (tpd_ring->buffer_info + count);
count += tpd_ring[i].count;
}
rfd_ring->buffer_info =
- (struct atl1c_buffer *) (tpd_ring->buffer_info + count);
+ (tpd_ring->buffer_info + count);
count += rfd_ring->count;
rx_desc_count += rfd_ring->count;
@@ -1227,7 +1214,7 @@ static void atl1c_start_mac(struct atl1c_adapter *adapter)
*/
static int atl1c_reset_mac(struct atl1c_hw *hw)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
u32 ctrl_data = 0;
@@ -1363,7 +1350,7 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed)
return;
}
-/*
+/**
* atl1c_configure - Configure Transmit&Receive Unit after Reset
* @adapter: board private structure
*
@@ -1477,7 +1464,7 @@ static void atl1c_update_hw_stats(struct atl1c_adapter *adapter)
}
}
-/*
+/**
* atl1c_get_stats - Get System Network Statistics
* @netdev: network interface device structure
*
@@ -1531,8 +1518,7 @@ static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter)
static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
enum atl1c_trans_queue type)
{
- struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
- &adapter->tpd_ring[type];
+ struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
struct atl1c_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
@@ -1559,11 +1545,10 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
return true;
}
-/*
+/**
* atl1c_intr - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
- * @pt_regs: CPU registers structure
*/
static irqreturn_t atl1c_intr(int irq, void *data)
{
@@ -1814,9 +1799,8 @@ rrs_checked:
atl1c_alloc_rx_buffer(adapter);
}
-/*
+/**
* atl1c_clean - NAPI Rx polling callback
- * @adapter: board private structure
*/
static int atl1c_clean(struct napi_struct *napi, int budget)
{
@@ -2271,7 +2255,7 @@ static void atl1c_down(struct atl1c_adapter *adapter)
atl1c_reset_dma_ring(adapter);
}
-/*
+/**
* atl1c_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -2310,7 +2294,7 @@ err_up:
return err;
}
-/*
+/**
* atl1c_close - Disables a network interface
* @netdev: network interface device structure
*
@@ -2433,7 +2417,7 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
return 0;
}
-/*
+/**
* atl1c_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in atl1c_pci_tbl
@@ -2580,7 +2564,7 @@ err_dma:
return err;
}
-/*
+/**
* atl1c_remove - Device Removal Routine
* @pdev: PCI device information struct
*
@@ -2606,7 +2590,7 @@ static void __devexit atl1c_remove(struct pci_dev *pdev)
free_netdev(netdev);
}
-/*
+/**
* atl1c_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
* @state: The current pci connection state
@@ -2634,7 +2618,7 @@ static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_NEED_RESET;
}
-/*
+/**
* atl1c_io_slot_reset - called after the pci bus has been reset.
* @pdev: Pointer to PCI device
*
@@ -2662,7 +2646,7 @@ static pci_ers_result_t atl1c_io_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-/*
+/**
* atl1c_io_resume - called when traffic can start flowing again.
* @pdev: Pointer to PCI device
*
@@ -2705,7 +2689,7 @@ static struct pci_driver atl1c_driver = {
.driver.pm = &atl1c_pm_ops,
};
-/*
+/**
* atl1c_init_module - Driver Registration Routine
*
* atl1c_init_module is the first routine called when the driver is
@@ -2716,7 +2700,7 @@ static int __init atl1c_init_module(void)
return pci_register_driver(&atl1c_driver);
}
-/*
+/**
* atl1c_exit_module - Driver Exit Cleanup Routine
*
* atl1c_exit_module is called just before the driver is removed
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 6e61f9f..82b2386 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -268,7 +268,7 @@ static int atl1e_set_eeprom(struct net_device *netdev,
if (eeprom_buff == NULL)
return -ENOMEM;
- ptr = (u32 *)eeprom_buff;
+ ptr = eeprom_buff;
if (eeprom->offset & 3) {
/* need read/modify/write of first changed EEPROM word */
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 1220e51..a98acc8 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -89,7 +89,7 @@ static const u16 atl1e_pay_load_size[] = {
128, 256, 512, 1024, 2048, 4096,
};
-/*
+/**
* atl1e_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
*/
@@ -102,7 +102,7 @@ static inline void atl1e_irq_enable(struct atl1e_adapter *adapter)
}
}
-/*
+/**
* atl1e_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
*/
@@ -114,7 +114,7 @@ static inline void atl1e_irq_disable(struct atl1e_adapter *adapter)
synchronize_irq(adapter->pdev->irq);
}
-/*
+/**
* atl1e_irq_reset - reset interrupt confiure on the NIC
* @adapter: board private structure
*/
@@ -126,7 +126,7 @@ static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
AT_WRITE_FLUSH(&adapter->hw);
}
-/*
+/**
* atl1e_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
@@ -210,7 +210,7 @@ static int atl1e_check_link(struct atl1e_adapter *adapter)
return 0;
}
-/*
+/**
* atl1e_link_chg_task - deal with link change event Out of interrupt context
* @netdev: network interface device structure
*/
@@ -259,7 +259,7 @@ static void atl1e_cancel_work(struct atl1e_adapter *adapter)
cancel_work_sync(&adapter->link_chg_task);
}
-/*
+/**
* atl1e_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
@@ -271,7 +271,7 @@ static void atl1e_tx_timeout(struct net_device *netdev)
schedule_work(&adapter->reset_task);
}
-/*
+/**
* atl1e_set_multi - Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -345,7 +345,7 @@ static void atl1e_restore_vlan(struct atl1e_adapter *adapter)
atl1e_vlan_mode(adapter->netdev, adapter->netdev->features);
}
-/*
+/**
* atl1e_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -397,7 +397,7 @@ static int atl1e_set_features(struct net_device *netdev,
return 0;
}
-/*
+/**
* atl1e_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -449,12 +449,6 @@ static void atl1e_mdio_write(struct net_device *netdev, int phy_id,
atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val);
}
-/*
- * atl1e_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl1e_mii_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
@@ -505,12 +499,6 @@ out:
}
-/*
- * atl1e_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl1e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -541,7 +529,7 @@ static void atl1e_setup_pcicmd(struct pci_dev *pdev)
msleep(1);
}
-/*
+/**
* atl1e_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
*
@@ -551,7 +539,7 @@ static int __devinit atl1e_alloc_queues(struct atl1e_adapter *adapter)
return 0;
}
-/*
+/**
* atl1e_sw_init - Initialize general software structures (struct atl1e_adapter)
* @adapter: board private structure to initialize
*
@@ -635,14 +623,13 @@ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
return 0;
}
-/*
+/**
* atl1e_clean_tx_ring - Free Tx-skb
* @adapter: board private structure
*/
static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
{
- struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)
- &adapter->tx_ring;
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
struct atl1e_tx_buffer *tx_buffer = NULL;
struct pci_dev *pdev = adapter->pdev;
u16 index, ring_count;
@@ -679,14 +666,14 @@ static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
ring_count);
}
-/*
+/**
* atl1e_clean_rx_ring - Free rx-reservation skbs
* @adapter: board private structure
*/
static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter)
{
struct atl1e_rx_ring *rx_ring =
- (struct atl1e_rx_ring *)&adapter->rx_ring;
+ &adapter->rx_ring;
struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc;
u16 i, j;
@@ -762,7 +749,7 @@ static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter)
}
}
-/*
+/**
* atl1e_free_ring_resources - Free Tx / RX descriptor Resources
* @adapter: board private structure
*
@@ -787,7 +774,7 @@ static void atl1e_free_ring_resources(struct atl1e_adapter *adapter)
}
}
-/*
+/**
* atl1e_setup_mem_resources - allocate Tx / RX descriptor resources
* @adapter: board private structure
*
@@ -884,14 +871,12 @@ failed:
return err;
}
-static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter)
+static inline void atl1e_configure_des_ring(struct atl1e_adapter *adapter)
{
- struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
- struct atl1e_rx_ring *rx_ring =
- (struct atl1e_rx_ring *)&adapter->rx_ring;
- struct atl1e_tx_ring *tx_ring =
- (struct atl1e_tx_ring *)&adapter->tx_ring;
+ struct atl1e_hw *hw = &adapter->hw;
+ struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
struct atl1e_rx_page_desc *rx_page_desc = NULL;
int i, j;
@@ -932,7 +917,7 @@ static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter)
static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
{
- struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
+ struct atl1e_hw *hw = &adapter->hw;
u32 dev_ctrl_data = 0;
u32 max_pay_load = 0;
u32 jumbo_thresh = 0;
@@ -975,7 +960,7 @@ static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
{
- struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
+ struct atl1e_hw *hw = &adapter->hw;
u32 rxf_len = 0;
u32 rxf_low = 0;
u32 rxf_high = 0;
@@ -1078,7 +1063,7 @@ static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
AT_WRITE_REG(hw, REG_MAC_CTRL, value);
}
-/*
+/**
* atl1e_configure - Configure Transmit&Receive Unit after Reset
* @adapter: board private structure
*
@@ -1148,7 +1133,7 @@ static int atl1e_configure(struct atl1e_adapter *adapter)
return 0;
}
-/*
+/**
* atl1e_get_stats - Get System Network Statistics
* @netdev: network interface device structure
*
@@ -1224,8 +1209,7 @@ static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter)
static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
{
- struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)
- &adapter->tx_ring;
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
struct atl1e_tx_buffer *tx_buffer = NULL;
u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX);
u16 next_to_clean = atomic_read(&tx_ring->next_to_clean);
@@ -1261,11 +1245,10 @@ static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
return true;
}
-/*
+/**
* atl1e_intr - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
- * @pt_regs: CPU registers structure
*/
static irqreturn_t atl1e_intr(int irq, void *data)
{
@@ -1384,15 +1367,14 @@ static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter,
(struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc;
u8 rx_using = rx_page_desc[que].rx_using;
- return (struct atl1e_rx_page *)&(rx_page_desc[que].rx_page[rx_using]);
+ return &(rx_page_desc[que].rx_page[rx_using]);
}
static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
int *work_done, int work_to_do)
{
struct net_device *netdev = adapter->netdev;
- struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *)
- &adapter->rx_ring;
+ struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
struct atl1e_rx_page_desc *rx_page_desc =
(struct atl1e_rx_page_desc *) rx_ring->rx_page_desc;
struct sk_buff *skb = NULL;
@@ -1494,9 +1476,8 @@ fatal_err:
schedule_work(&adapter->reset_task);
}
-/*
+/**
* atl1e_clean - NAPI Rx polling callback
- * @adapter: board private structure
*/
static int atl1e_clean(struct napi_struct *napi, int budget)
{
@@ -1576,7 +1557,7 @@ static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter)
tx_ring->next_to_use = 0;
memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc));
- return (struct atl1e_tpd_desc *)&tx_ring->desc[next_to_use];
+ return &tx_ring->desc[next_to_use];
}
static struct atl1e_tx_buffer *
@@ -1961,7 +1942,7 @@ void atl1e_down(struct atl1e_adapter *adapter)
atl1e_clean_rx_ring(adapter);
}
-/*
+/**
* atl1e_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -2007,7 +1988,7 @@ err_req_irq:
return err;
}
-/*
+/**
* atl1e_close - Disables a network interface
* @netdev: network interface device structure
*
@@ -2061,8 +2042,8 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
if (wufc) {
/* get link status */
- atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
- atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
+ atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data);
+ atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data);
mii_advertise_data = ADVERTISE_10HALF;
@@ -2086,7 +2067,7 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
msleep(100);
atl1e_read_phy_reg(hw, MII_BMSR,
- (u16 *)&mii_bmsr_data);
+ &mii_bmsr_data);
if (mii_bmsr_data & BMSR_LSTATUS)
break;
}
@@ -2243,7 +2224,7 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
return 0;
}
-/*
+/**
* atl1e_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in atl1e_pci_tbl
@@ -2397,7 +2378,7 @@ err_dma:
return err;
}
-/*
+/**
* atl1e_remove - Device Removal Routine
* @pdev: PCI device information struct
*
@@ -2429,7 +2410,7 @@ static void __devexit atl1e_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-/*
+/**
* atl1e_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
* @state: The current pci connection state
@@ -2457,7 +2438,7 @@ atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
return PCI_ERS_RESULT_NEED_RESET;
}
-/*
+/**
* atl1e_io_slot_reset - called after the pci bus has been reset.
* @pdev: Pointer to PCI device
*
@@ -2484,7 +2465,7 @@ static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-/*
+/**
* atl1e_io_resume - called when traffic can start flowing again.
* @pdev: Pointer to PCI device
*
@@ -2528,7 +2509,7 @@ static struct pci_driver atl1e_driver = {
.err_handler = &atl1e_err_handler
};
-/*
+/**
* atl1e_init_module - Driver Registration Routine
*
* atl1e_init_module is the first routine called when the driver is
@@ -2539,7 +2520,7 @@ static int __init atl1e_init_module(void)
return pci_register_driver(&atl1e_driver);
}
-/*
+/**
* atl1e_exit_module - Driver Exit Cleanup Routine
*
* atl1e_exit_module is called just before the driver is removed
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_param.c b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
index 0ce60b6..b5086f1 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
@@ -168,7 +168,7 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
return -1;
}
-/*
+/**
* atl1e_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 5d10884..f2402f3 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -195,7 +195,7 @@ static int __devinit atl1_validate_option(int *value, struct atl1_option *opt,
return -1;
}
-/*
+/**
* atl1_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
@@ -937,7 +937,7 @@ static void atl1_set_mac_addr(struct atl1_hw *hw)
iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
}
-/*
+/**
* atl1_sw_init - Initialize general software structures (struct atl1_adapter)
* @adapter: board private structure to initialize
*
@@ -1014,12 +1014,6 @@ static void mdio_write(struct net_device *netdev, int phy_id, int reg_num,
atl1_write_phy_reg(&adapter->hw, reg_num, val);
}
-/*
- * atl1_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
@@ -1036,7 +1030,7 @@ static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return retval;
}
-/*
+/**
* atl1_setup_mem_resources - allocate Tx / RX descriptor resources
* @adapter: board private structure
*
@@ -1061,7 +1055,7 @@ static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
goto err_nomem;
}
rfd_ring->buffer_info =
- (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
+ (tpd_ring->buffer_info + tpd_ring->count);
/*
* real ring DMA buffer
@@ -1147,7 +1141,7 @@ static void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
atomic_set(&rrd_ring->next_to_clean, 0);
}
-/*
+/**
* atl1_clean_rx_ring - Free RFD Buffers
* @adapter: board private structure
*/
@@ -1187,7 +1181,7 @@ static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
atomic_set(&rrd_ring->next_to_clean, 0);
}
-/*
+/**
* atl1_clean_tx_ring - Free Tx Buffers
* @adapter: board private structure
*/
@@ -1227,7 +1221,7 @@ static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
atomic_set(&tpd_ring->next_to_clean, 0);
}
-/*
+/**
* atl1_free_ring_resources - Free Tx / RX descriptor Resources
* @adapter: board private structure
*
@@ -1470,7 +1464,7 @@ static void set_flow_ctrl_new(struct atl1_hw *hw)
iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
}
-/*
+/**
* atl1_configure - Configure Transmit&Receive Unit after Reset
* @adapter: board private structure
*
@@ -1844,7 +1838,7 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
}
}
-/*
+/**
* atl1_alloc_rx_buffers - Replace used receive buffers
* @adapter: address of board private structure
*/
@@ -2489,11 +2483,10 @@ static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter)
return 1;
}
-/*
+/**
* atl1_intr - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
- * @pt_regs: CPU registers structure
*/
static irqreturn_t atl1_intr(int irq, void *data)
{
@@ -2574,7 +2567,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
}
-/*
+/**
* atl1_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
@@ -2693,7 +2686,7 @@ static void atl1_reset_dev_task(struct work_struct *work)
netif_device_attach(netdev);
}
-/*
+/**
* atl1_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -2727,7 +2720,7 @@ static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-/*
+/**
* atl1_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -2762,7 +2755,7 @@ err_up:
return err;
}
-/*
+/**
* atl1_close - Disables a network interface
* @netdev: network interface device structure
*
@@ -2930,7 +2923,7 @@ static const struct net_device_ops atl1_netdev_ops = {
#endif
};
-/*
+/**
* atl1_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in atl1_pci_tbl
@@ -3111,7 +3104,7 @@ err_request_regions:
return err;
}
-/*
+/**
* atl1_remove - Device Removal Routine
* @pdev: PCI device information struct
*
@@ -3158,7 +3151,7 @@ static struct pci_driver atl1_driver = {
.driver.pm = ATL1_PM_OPS,
};
-/*
+/**
* atl1_exit_module - Driver Exit Cleanup Routine
*
* atl1_exit_module is called just before the driver is removed
@@ -3169,7 +3162,7 @@ static void __exit atl1_exit_module(void)
pci_unregister_driver(&atl1_driver);
}
-/*
+/**
* atl1_init_module - Driver Registration Routine
*
* atl1_init_module is the first routine called when the driver is
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 6762dc4..7c0b7e2 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -75,7 +75,7 @@ static void atl2_set_ethtool_ops(struct net_device *netdev);
static void atl2_check_options(struct atl2_adapter *adapter);
-/*
+/**
* atl2_sw_init - Initialize general software structures (struct atl2_adapter)
* @adapter: board private structure to initialize
*
@@ -123,7 +123,7 @@ static int __devinit atl2_sw_init(struct atl2_adapter *adapter)
return 0;
}
-/*
+/**
* atl2_set_multi - Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -177,7 +177,7 @@ static void init_ring_ptrs(struct atl2_adapter *adapter)
adapter->txs_next_clear = 0;
}
-/*
+/**
* atl2_configure - Configure Transmit&Receive Unit after Reset
* @adapter: board private structure
*
@@ -283,7 +283,7 @@ static int atl2_configure(struct atl2_adapter *adapter)
return value;
}
-/*
+/**
* atl2_setup_ring_resources - allocate Tx / RX descriptor resources
* @adapter: board private structure
*
@@ -340,7 +340,7 @@ static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter)
return 0;
}
-/*
+/**
* atl2_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
*/
@@ -350,7 +350,7 @@ static inline void atl2_irq_enable(struct atl2_adapter *adapter)
ATL2_WRITE_FLUSH(&adapter->hw);
}
-/*
+/**
* atl2_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
*/
@@ -599,11 +599,10 @@ static inline void atl2_clear_phy_int(struct atl2_adapter *adapter)
spin_unlock(&adapter->stats_lock);
}
-/*
+/**
* atl2_intr - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
- * @pt_regs: CPU registers structure
*/
static irqreturn_t atl2_intr(int irq, void *data)
{
@@ -679,7 +678,7 @@ static int atl2_request_irq(struct atl2_adapter *adapter)
netdev);
}
-/*
+/**
* atl2_free_ring_resources - Free Tx / RX descriptor Resources
* @adapter: board private structure
*
@@ -692,7 +691,7 @@ static void atl2_free_ring_resources(struct atl2_adapter *adapter)
adapter->ring_dma);
}
-/*
+/**
* atl2_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -798,7 +797,7 @@ static void atl2_free_irq(struct atl2_adapter *adapter)
#endif
}
-/*
+/**
* atl2_close - Disables a network interface
* @netdev: network interface device structure
*
@@ -918,7 +917,7 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-/*
+/**
* atl2_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -943,7 +942,7 @@ static int atl2_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-/*
+/**
* atl2_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -969,12 +968,6 @@ static int atl2_set_mac(struct net_device *netdev, void *p)
return 0;
}
-/*
- * atl2_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
@@ -1011,12 +1004,6 @@ static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return 0;
}
-/*
- * atl2_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -1033,7 +1020,7 @@ static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
-/*
+/**
* atl2_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
@@ -1045,7 +1032,7 @@ static void atl2_tx_timeout(struct net_device *netdev)
schedule_work(&adapter->reset_task);
}
-/*
+/**
* atl2_watchdog - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
@@ -1070,7 +1057,7 @@ static void atl2_watchdog(unsigned long data)
}
}
-/*
+/**
* atl2_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
@@ -1274,9 +1261,8 @@ static int atl2_check_link(struct atl2_adapter *adapter)
return 0;
}
-/*
+/**
* atl2_link_chg_task - deal with link change event Out of interrupt context
- * @netdev: network interface device structure
*/
static void atl2_link_chg_task(struct work_struct *work)
{
@@ -1341,7 +1327,7 @@ static const struct net_device_ops atl2_netdev_ops = {
#endif
};
-/*
+/**
* atl2_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in atl2_pci_tbl
@@ -1501,7 +1487,7 @@ err_dma:
return err;
}
-/*
+/**
* atl2_remove - Device Removal Routine
* @pdev: PCI device information struct
*
@@ -1728,7 +1714,7 @@ static struct pci_driver atl2_driver = {
.shutdown = atl2_shutdown,
};
-/*
+/**
* atl2_init_module - Driver Registration Routine
*
* atl2_init_module is the first routine called when the driver is
@@ -1743,7 +1729,7 @@ static int __init atl2_init_module(void)
}
module_init(atl2_init_module);
-/*
+/**
* atl2_exit_module - Driver Exit Cleanup Routine
*
* atl2_exit_module is called just before the driver is removed
@@ -2997,7 +2983,7 @@ static int __devinit atl2_validate_option(int *value, struct atl2_option *opt)
return -1;
}
-/*
+/**
* atl2_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index b4f3aa4..77ffbc4 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -64,7 +64,7 @@ static int atlx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
-/*
+/**
* atlx_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -115,7 +115,7 @@ static void atlx_check_for_link(struct atlx_adapter *adapter)
schedule_work(&adapter->link_chg_task);
}
-/*
+/**
* atlx_set_multi - Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -162,7 +162,7 @@ static inline void atlx_imr_set(struct atlx_adapter *adapter,
ioread32(adapter->hw.hw_addr + REG_IMR);
}
-/*
+/**
* atlx_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
*/
@@ -172,7 +172,7 @@ static void atlx_irq_enable(struct atlx_adapter *adapter)
adapter->int_enabled = true;
}
-/*
+/**
* atlx_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
*/
@@ -193,7 +193,7 @@ static void atlx_clear_phy_int(struct atlx_adapter *adapter)
spin_unlock_irqrestore(&adapter->lock, flags);
}
-/*
+/**
* atlx_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 46b8b7d..d09c6b5 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -656,7 +656,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
dma_unmap_single(bp->sdev->dma_dev, mapping,
RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
- skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
+ skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
if (skb == NULL)
return -ENOMEM;
mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
@@ -967,7 +967,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_unmap_single(bp->sdev->dma_dev, mapping, len,
DMA_TO_DEVICE);
- bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
+ bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb)
goto err_out;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index ac7b744..0ced154 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/stringify.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/errno.h>
@@ -57,8 +58,8 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.2.1"
-#define DRV_MODULE_RELDATE "Dec 18, 2011"
+#define DRV_MODULE_VERSION "2.2.3"
+#define DRV_MODULE_RELDATE "June 27, 2012"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -872,8 +873,7 @@ bnx2_alloc_mem(struct bnx2 *bp)
bnapi = &bp->bnx2_napi[i];
- sblk = (void *) (status_blk +
- BNX2_SBLK_MSIX_ALIGN_SIZE * i);
+ sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
bnapi->status_blk.msix = sblk;
bnapi->hw_tx_cons_ptr =
&sblk->status_tx_quick_consumer_index;
@@ -1972,22 +1972,26 @@ bnx2_remote_phy_event(struct bnx2 *bp)
switch (speed) {
case BNX2_LINK_STATUS_10HALF:
bp->duplex = DUPLEX_HALF;
+ /* fall through */
case BNX2_LINK_STATUS_10FULL:
bp->line_speed = SPEED_10;
break;
case BNX2_LINK_STATUS_100HALF:
bp->duplex = DUPLEX_HALF;
+ /* fall through */
case BNX2_LINK_STATUS_100BASE_T4:
case BNX2_LINK_STATUS_100FULL:
bp->line_speed = SPEED_100;
break;
case BNX2_LINK_STATUS_1000HALF:
bp->duplex = DUPLEX_HALF;
+ /* fall through */
case BNX2_LINK_STATUS_1000FULL:
bp->line_speed = SPEED_1000;
break;
case BNX2_LINK_STATUS_2500HALF:
bp->duplex = DUPLEX_HALF;
+ /* fall through */
case BNX2_LINK_STATUS_2500FULL:
bp->line_speed = SPEED_2500;
break;
@@ -2473,6 +2477,7 @@ bnx2_dump_mcp_state(struct bnx2 *bp)
bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
pr_cont(" condition[%08x]\n",
bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
+ DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
DP_SHMEM_LINE(bp, 0x3cc);
DP_SHMEM_LINE(bp, 0x3dc);
DP_SHMEM_LINE(bp, 0x3ec);
@@ -5372,7 +5377,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
int k, last;
if (skb == NULL) {
- j++;
+ j = NEXT_TX_BD(j);
continue;
}
@@ -5384,8 +5389,8 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
tx_buf->skb = NULL;
last = tx_buf->nr_frags;
- j++;
- for (k = 0; k < last; k++, j++) {
+ j = NEXT_TX_BD(j);
+ for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(tx_buf, mapping),
@@ -6245,7 +6250,7 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
static int
bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
{
- int cpus = num_online_cpus();
+ int cpus = netif_get_num_default_rss_queues();
int msix_vecs;
if (!bp->num_req_rx_rings)
@@ -6406,6 +6411,75 @@ bnx2_reset_task(struct work_struct *work)
rtnl_unlock();
}
+#define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
+
+static void
+bnx2_dump_ftq(struct bnx2 *bp)
+{
+ int i;
+ u32 reg, bdidx, cid, valid;
+ struct net_device *dev = bp->dev;
+ static const struct ftq_reg {
+ char *name;
+ u32 off;
+ } ftq_arr[] = {
+ BNX2_FTQ_ENTRY(RV2P_P),
+ BNX2_FTQ_ENTRY(RV2P_T),
+ BNX2_FTQ_ENTRY(RV2P_M),
+ BNX2_FTQ_ENTRY(TBDR_),
+ BNX2_FTQ_ENTRY(TDMA_),
+ BNX2_FTQ_ENTRY(TXP_),
+ BNX2_FTQ_ENTRY(TXP_),
+ BNX2_FTQ_ENTRY(TPAT_),
+ BNX2_FTQ_ENTRY(RXP_C),
+ BNX2_FTQ_ENTRY(RXP_),
+ BNX2_FTQ_ENTRY(COM_COMXQ_),
+ BNX2_FTQ_ENTRY(COM_COMTQ_),
+ BNX2_FTQ_ENTRY(COM_COMQ_),
+ BNX2_FTQ_ENTRY(CP_CPQ_),
+ };
+
+ netdev_err(dev, "<--- start FTQ dump --->\n");
+ for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
+ netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
+ bnx2_reg_rd_ind(bp, ftq_arr[i].off));
+
+ netdev_err(dev, "CPU states:\n");
+ for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
+ netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
+ reg, bnx2_reg_rd_ind(bp, reg),
+ bnx2_reg_rd_ind(bp, reg + 4),
+ bnx2_reg_rd_ind(bp, reg + 8),
+ bnx2_reg_rd_ind(bp, reg + 0x1c),
+ bnx2_reg_rd_ind(bp, reg + 0x1c),
+ bnx2_reg_rd_ind(bp, reg + 0x20));
+
+ netdev_err(dev, "<--- end FTQ dump --->\n");
+ netdev_err(dev, "<--- start TBDC dump --->\n");
+ netdev_err(dev, "TBDC free cnt: %ld\n",
+ REG_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
+ netdev_err(dev, "LINE CID BIDX CMD VALIDS\n");
+ for (i = 0; i < 0x20; i++) {
+ int j = 0;
+
+ REG_WR(bp, BNX2_TBDC_BD_ADDR, i);
+ REG_WR(bp, BNX2_TBDC_CAM_OPCODE,
+ BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
+ REG_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
+ while ((REG_RD(bp, BNX2_TBDC_COMMAND) &
+ BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
+ j++;
+
+ cid = REG_RD(bp, BNX2_TBDC_CID);
+ bdidx = REG_RD(bp, BNX2_TBDC_BIDX);
+ valid = REG_RD(bp, BNX2_TBDC_CAM_OPCODE);
+ netdev_err(dev, "%02x %06x %04lx %02x [%x]\n",
+ i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
+ bdidx >> 24, (valid >> 8) & 0x0ff);
+ }
+ netdev_err(dev, "<--- end TBDC dump --->\n");
+}
+
static void
bnx2_dump_state(struct bnx2 *bp)
{
@@ -6435,6 +6509,7 @@ bnx2_tx_timeout(struct net_device *dev)
{
struct bnx2 *bp = netdev_priv(dev);
+ bnx2_dump_ftq(bp);
bnx2_dump_state(bp);
bnx2_dump_mcp_state(bp);
@@ -6628,6 +6703,7 @@ bnx2_close(struct net_device *dev)
bnx2_disable_int_sync(bp);
bnx2_napi_disable(bp);
+ netif_tx_disable(dev);
del_timer_sync(&bp->timer);
bnx2_shutdown_chip(bp);
bnx2_free_irq(bp);
@@ -7832,7 +7908,7 @@ bnx2_get_5709_media(struct bnx2 *bp)
else
strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
- if (PCI_FUNC(bp->pdev->devfn) == 0) {
+ if (bp->func == 0) {
switch (strap) {
case 0x4:
case 0x5:
@@ -8131,9 +8207,12 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
+ if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
+ bp->func = 1;
+
if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
BNX2_SHM_HDR_SIGNATURE_SIG) {
- u32 off = PCI_FUNC(pdev->devfn) << 2;
+ u32 off = bp->func << 2;
bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
} else
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index dc06bda..af6451d 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -4642,6 +4642,47 @@ struct l2_fhdr {
#define BNX2_TBDR_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
+/*
+ * tbdc definition
+ * offset: 0x5400
+ */
+#define BNX2_TBDC_COMMAND 0x5400
+#define BNX2_TBDC_COMMAND_CMD_ENABLED (1UL<<0)
+#define BNX2_TBDC_COMMAND_CMD_FLUSH (1UL<<1)
+#define BNX2_TBDC_COMMAND_CMD_SOFT_RST (1UL<<2)
+#define BNX2_TBDC_COMMAND_CMD_REG_ARB (1UL<<3)
+#define BNX2_TBDC_COMMAND_WRCHK_RANGE_ERROR (1UL<<4)
+#define BNX2_TBDC_COMMAND_WRCHK_ALL_ONES_ERROR (1UL<<5)
+#define BNX2_TBDC_COMMAND_WRCHK_ALL_ZEROS_ERROR (1UL<<6)
+#define BNX2_TBDC_COMMAND_WRCHK_ANY_ONES_ERROR (1UL<<7)
+#define BNX2_TBDC_COMMAND_WRCHK_ANY_ZEROS_ERROR (1UL<<8)
+
+#define BNX2_TBDC_STATUS 0x5404
+#define BNX2_TBDC_STATUS_FREE_CNT (0x3fUL<<0)
+
+#define BNX2_TBDC_BD_ADDR 0x5424
+
+#define BNX2_TBDC_BIDX 0x542c
+#define BNX2_TBDC_BDIDX_BDIDX (0xffffUL<<0)
+#define BNX2_TBDC_BDIDX_CMD (0xffUL<<24)
+
+#define BNX2_TBDC_CID 0x5430
+
+#define BNX2_TBDC_CAM_OPCODE 0x5434
+#define BNX2_TBDC_CAM_OPCODE_OPCODE (0x7UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_SEARCH (0UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_CACHE_WRITE (1UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_INVALIDATE (2UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_WRITE (4UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ (5UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_RAM_WRITE (6UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_RAM_READ (7UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_SMASK_BDIDX (1UL<<4)
+#define BNX2_TBDC_CAM_OPCODE_SMASK_CID (1UL<<5)
+#define BNX2_TBDC_CAM_OPCODE_SMASK_CMD (1UL<<6)
+#define BNX2_TBDC_CAM_OPCODE_WMT_FAILED (1UL<<7)
+#define BNX2_TBDC_CAM_OPCODE_CAM_VALIDS (0xffUL<<8)
+
/*
* tdma_reg definition
@@ -6930,6 +6971,8 @@ struct bnx2 {
struct bnx2_irq irq_tbl[BNX2_MAX_MSIX_VEC];
int irq_nvecs;
+ u8 func;
+
u8 num_tx_rings;
u8 num_rx_rings;
@@ -7314,6 +7357,8 @@ struct bnx2_rv2p_fw_file {
#define BNX2_BC_STATE_RESET_TYPE_VALUE(msg) (BNX2_BC_STATE_RESET_TYPE_SIG | \
(msg))
+#define BNX2_BC_RESET_TYPE 0x000001c0
+
#define BNX2_BC_STATE 0x000001c4
#define BNX2_BC_STATE_ERR_MASK 0x0000ff00
#define BNX2_BC_STATE_SIGN 0x42530000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e30e2a2..52f33b8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -23,8 +23,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.72.50-0"
-#define DRV_MODULE_RELDATE "2012/04/23"
+#define DRV_MODULE_VERSION "1.72.51-0"
+#define DRV_MODULE_RELDATE "2012/06/18"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
@@ -51,6 +51,7 @@
#include "bnx2x_reg.h"
#include "bnx2x_fw_defs.h"
+#include "bnx2x_mfw_req.h"
#include "bnx2x_hsi.h"
#include "bnx2x_link.h"
#include "bnx2x_sp.h"
@@ -248,13 +249,12 @@ enum {
BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
};
-#define BNX2X_CNIC_START_ETH_CID 48
-enum {
+#define BNX2X_CNIC_START_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) *\
+ (bp)->max_cos)
/* iSCSI L2 */
- BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID,
+#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp))
/* FCoE L2 */
- BNX2X_FCOE_ETH_CID,
-};
+#define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1)
/** Additional rings budgeting */
#ifdef BCM_CNIC
@@ -276,29 +276,30 @@ enum {
#define FIRST_TX_ONLY_COS_INDEX 1
#define FIRST_TX_COS_INDEX 0
-/* defines for decodeing the fastpath index and the cos index out of the
- * transmission queue index
- */
-#define MAX_TXQS_PER_COS FP_SB_MAX_E1x
-
-#define TXQ_TO_FP(txq_index) ((txq_index) % MAX_TXQS_PER_COS)
-#define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS)
-
/* rules for calculating the cids of tx-only connections */
-#define CID_TO_FP(cid) ((cid) % MAX_TXQS_PER_COS)
-#define CID_COS_TO_TX_ONLY_CID(cid, cos) (cid + cos * MAX_TXQS_PER_COS)
+#define CID_TO_FP(cid, bp) ((cid) % BNX2X_NUM_NON_CNIC_QUEUES(bp))
+#define CID_COS_TO_TX_ONLY_CID(cid, cos, bp) \
+ (cid + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
/* fp index inside class of service range */
-#define FP_COS_TO_TXQ(fp, cos) ((fp)->index + cos * MAX_TXQS_PER_COS)
-
-/*
- * 0..15 eth cos0
- * 16..31 eth cos1 if applicable
- * 32..47 eth cos2 If applicable
- * fcoe queue follows eth queues (16, 32, 48 depending on cos)
+#define FP_COS_TO_TXQ(fp, cos, bp) \
+ ((fp)->index + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
+
+/* Indexes for transmission queues array:
+ * txdata for RSS i CoS j is at location i + (j * num of RSS)
+ * txdata for FCoE (if exist) is at location max cos * num of RSS
+ * txdata for FWD (if exist) is one location after FCoE
+ * txdata for OOO (if exist) is one location after FWD
*/
-#define MAX_ETH_TXQ_IDX(bp) (MAX_TXQS_PER_COS * (bp)->max_cos)
-#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp))
+enum {
+ FCOE_TXQ_IDX_OFFSET,
+ FWD_TXQ_IDX_OFFSET,
+ OOO_TXQ_IDX_OFFSET,
+};
+#define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos)
+#ifdef BCM_CNIC
+#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET)
+#endif
/* fast path */
/*
@@ -453,6 +454,7 @@ struct bnx2x_agg_info {
u16 vlan_tag;
u16 len_on_bd;
u32 rxhash;
+ bool l4_rxhash;
u16 gro_size;
u16 full_page;
};
@@ -481,6 +483,8 @@ struct bnx2x_fp_txdata {
__le16 *tx_cons_sb;
int txq_index;
+ struct bnx2x_fastpath *parent_fp;
+ int tx_ring_size;
};
enum bnx2x_tpa_mode_t {
@@ -507,7 +511,7 @@ struct bnx2x_fastpath {
enum bnx2x_tpa_mode_t mode;
u8 max_cos; /* actual number of active tx coses */
- struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS];
+ struct bnx2x_fp_txdata *txdata_ptr[BNX2X_MULTI_TX_COS];
struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */
struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */
@@ -547,51 +551,45 @@ struct bnx2x_fastpath {
rx_calls;
/* TPA related */
- struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2];
+ struct bnx2x_agg_info *tpa_info;
u8 disable_tpa;
#ifdef BNX2X_STOP_ON_ERROR
u64 tpa_queue_used;
#endif
-
- struct tstorm_per_queue_stats old_tclient;
- struct ustorm_per_queue_stats old_uclient;
- struct xstorm_per_queue_stats old_xclient;
- struct bnx2x_eth_q_stats eth_q_stats;
- struct bnx2x_eth_q_stats_old eth_q_stats_old;
-
/* The size is calculated using the following:
sizeof name field from netdev structure +
4 ('-Xx-' string) +
4 (for the digits and to make it DWORD aligned) */
#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
char name[FP_NAME_SIZE];
-
- /* MACs object */
- struct bnx2x_vlan_mac_obj mac_obj;
-
- /* Queue State object */
- struct bnx2x_queue_sp_obj q_obj;
-
};
-#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
+#define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var)
+#define bnx2x_sp_obj(bp, fp) ((bp)->sp_objs[(fp)->index])
+#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
+#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
/* Use 2500 as a mini-jumbo MTU for FCoE */
#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
-/* FCoE L2 `fastpath' entry is right after the eth entries */
-#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp)
-#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX])
-#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
-#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \
- txdata[FIRST_TX_COS_INDEX].var)
+#define FCOE_IDX_OFFSET 0
+
+#define FCOE_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) + \
+ FCOE_IDX_OFFSET)
+#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX(bp)])
+#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
+#define bnx2x_fcoe_inner_sp_obj(bp) (&bp->sp_objs[FCOE_IDX(bp)])
+#define bnx2x_fcoe_sp_obj(bp, var) (bnx2x_fcoe_inner_sp_obj(bp)->var)
+#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \
+ txdata_ptr[FIRST_TX_COS_INDEX] \
+ ->var)
#define IS_ETH_FP(fp) (fp->index < \
BNX2X_NUM_ETH_QUEUES(fp->bp))
#ifdef BCM_CNIC
-#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX)
-#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX)
+#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX(fp->bp))
+#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
#else
#define IS_FCOE_FP(fp) false
#define IS_FCOE_IDX(idx) false
@@ -747,21 +745,6 @@ struct bnx2x_fastpath {
#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
-#define BNX2X_IP_CSUM_ERR(cqe) \
- (!((cqe)->fast_path_cqe.status_flags & \
- ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
- ((cqe)->fast_path_cqe.type_error_flags & \
- ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
-
-#define BNX2X_L4_CSUM_ERR(cqe) \
- (!((cqe)->fast_path_cqe.status_flags & \
- ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
- ((cqe)->fast_path_cqe.type_error_flags & \
- ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
-
-#define BNX2X_RX_CSUM_OK(cqe) \
- (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
-
#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
(((le16_to_cpu(flags) & \
PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
@@ -993,8 +976,8 @@ union cdu_context {
};
/* CDU host DB constants */
-#define CDU_ILT_PAGE_SZ_HW 3
-#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */
+#define CDU_ILT_PAGE_SZ_HW 2
+#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */
#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
#ifdef BCM_CNIC
@@ -1197,11 +1180,31 @@ struct bnx2x_prev_path_list {
struct list_head list;
};
+struct bnx2x_sp_objs {
+ /* MACs object */
+ struct bnx2x_vlan_mac_obj mac_obj;
+
+ /* Queue State object */
+ struct bnx2x_queue_sp_obj q_obj;
+};
+
+struct bnx2x_fp_stats {
+ struct tstorm_per_queue_stats old_tclient;
+ struct ustorm_per_queue_stats old_uclient;
+ struct xstorm_per_queue_stats old_xclient;
+ struct bnx2x_eth_q_stats eth_q_stats;
+ struct bnx2x_eth_q_stats_old eth_q_stats_old;
+};
+
struct bnx2x {
/* Fields used in the tx and intr/napi performance paths
* are grouped together in the beginning of the structure
*/
struct bnx2x_fastpath *fp;
+ struct bnx2x_sp_objs *sp_objs;
+ struct bnx2x_fp_stats *fp_stats;
+ struct bnx2x_fp_txdata *bnx2x_txq;
+ int bnx2x_txq_size;
void __iomem *regview;
void __iomem *doorbells;
u16 db_size;
@@ -1316,7 +1319,9 @@ struct bnx2x {
#define NO_ISCSI_FLAG (1 << 14)
#define NO_FCOE_FLAG (1 << 15)
#define BC_SUPPORTS_PFC_STATS (1 << 17)
+#define BC_SUPPORTS_FCOE_FEATURES (1 << 19)
#define USING_SINGLE_MSIX_FLAG (1 << 20)
+#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1392,6 +1397,7 @@ struct bnx2x {
#define BNX2X_MAX_COS 3
#define BNX2X_MAX_TX_COS 2
int num_queues;
+ int num_napi_queues;
int disable_tpa;
u32 rx_mode;
@@ -1404,6 +1410,7 @@ struct bnx2x {
u8 igu_dsb_id;
u8 igu_base_sb;
u8 igu_sb_cnt;
+
dma_addr_t def_status_blk_mapping;
struct bnx2x_slowpath *slowpath;
@@ -1435,7 +1442,11 @@ struct bnx2x {
dma_addr_t fw_stats_data_mapping;
int fw_stats_data_sz;
- struct hw_context context;
+ /* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB
+ * context size we need 8 ILT entries.
+ */
+#define ILT_MAX_L2_LINES 8
+ struct hw_context context[ILT_MAX_L2_LINES];
struct bnx2x_ilt *ilt;
#define BP_ILT(bp) ((bp)->ilt)
@@ -1448,13 +1459,14 @@ struct bnx2x {
/*
* Maximum CID count that might be required by the bnx2x:
- * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related)
+ * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
*/
-#define BNX2X_L2_CID_COUNT(bp) (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\
- NON_ETH_CONTEXT_USE + CNIC_PRESENT)
+#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
+ + NON_ETH_CONTEXT_USE + CNIC_PRESENT)
+#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
+ + NON_ETH_CONTEXT_USE + CNIC_PRESENT)
#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
ILT_PAGE_CIDS))
-#define BNX2X_DB_SIZE(bp) (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT))
int qm_cid_count;
@@ -1613,6 +1625,8 @@ struct bnx2x {
extern int num_queues;
#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE)
+#define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \
+ NON_ETH_CONTEXT_USE)
#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp)
#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
@@ -1671,6 +1685,9 @@ struct bnx2x_func_init_params {
continue; \
else
+#define for_each_napi_rx_queue(bp, var) \
+ for ((var) = 0; (var) < bp->num_napi_queues; (var)++)
+
/* Skip OOO FP */
#define for_each_tx_queue(bp, var) \
for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
@@ -1724,15 +1741,6 @@ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
struct bnx2x_vlan_mac_obj *obj, bool set,
int mac_type, unsigned long *ramrod_flags);
/**
- * Deletes all MACs configured for the specific MAC object.
- *
- * @param bp Function driver instance
- * @param mac_obj MAC object to cleanup
- *
- * @return zero if all MACs were cleaned
- */
-
-/**
* bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
*
* @bp: driver handle
@@ -1832,6 +1840,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define LOAD_NORMAL 0
#define LOAD_OPEN 1
#define LOAD_DIAG 2
+#define LOAD_LOOPBACK_EXT 3
#define UNLOAD_NORMAL 0
#define UNLOAD_CLOSE 1
#define UNLOAD_RECOVERY 2
@@ -1914,13 +1923,17 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define PCICFG_LINK_SPEED 0xf0000
#define PCICFG_LINK_SPEED_SHIFT 16
-
-#define BNX2X_NUM_TESTS 7
+#define BNX2X_NUM_TESTS_SF 7
+#define BNX2X_NUM_TESTS_MF 3
+#define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \
+ BNX2X_NUM_TESTS_SF)
#define BNX2X_PHY_LOOPBACK 0
#define BNX2X_MAC_LOOPBACK 1
+#define BNX2X_EXT_LOOPBACK 2
#define BNX2X_PHY_LOOPBACK_FAILED 1
#define BNX2X_MAC_LOOPBACK_FAILED 2
+#define BNX2X_EXT_LOOPBACK_FAILED 3
#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \
BNX2X_PHY_LOOPBACK_FAILED)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ad0743b..5aeb034 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -40,12 +40,19 @@
* Makes sure the contents of the bp->fp[to].napi is kept
* intact. This is done by first copying the napi struct from
* the target to the source, and then mem copying the entire
- * source onto the target
+ * source onto the target. Update txdata pointers and related
+ * content.
*/
static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
{
struct bnx2x_fastpath *from_fp = &bp->fp[from];
struct bnx2x_fastpath *to_fp = &bp->fp[to];
+ struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
+ struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
+ struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
+ struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
+ int old_max_eth_txqs, new_max_eth_txqs;
+ int old_txdata_index = 0, new_txdata_index = 0;
/* Copy the NAPI object as it has been already initialized */
from_fp->napi = to_fp->napi;
@@ -53,6 +60,30 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
/* Move bnx2x_fastpath contents */
memcpy(to_fp, from_fp, sizeof(*to_fp));
to_fp->index = to;
+
+ /* move sp_objs contents as well, as their indices match fp ones */
+ memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
+
+ /* move fp_stats contents as well, as their indices match fp ones */
+ memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
+
+ /* Update txdata pointers in fp and move txdata content accordingly:
+ * Each fp consumes 'max_cos' txdata structures, so the index should be
+ * decremented by max_cos x delta.
+ */
+
+ old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
+ new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
+ (bp)->max_cos;
+ if (from == FCOE_IDX(bp)) {
+ old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
+ new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
+ }
+
+ memcpy(&bp->bnx2x_txq[old_txdata_index],
+ &bp->bnx2x_txq[new_txdata_index],
+ sizeof(struct bnx2x_fp_txdata));
+ to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
}
int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
@@ -190,7 +221,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
if ((netif_tx_queue_stopped(txq)) &&
(bp->state == BNX2X_STATE_OPEN) &&
- (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
+ (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
netif_tx_wake_queue(txq);
__netif_tx_unlock(txq);
@@ -264,12 +295,20 @@ static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
* CQE (calculated by HW).
*/
static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
- const struct eth_fast_path_rx_cqe *cqe)
+ const struct eth_fast_path_rx_cqe *cqe,
+ bool *l4_rxhash)
{
/* Set Toeplitz hash from CQE */
if ((bp->dev->features & NETIF_F_RXHASH) &&
- (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
+ (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
+ enum eth_rss_hash_type htype;
+
+ htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
+ *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
+ (htype == TCP_IPV6_HASH_TYPE);
return le32_to_cpu(cqe->rss_hash_result);
+ }
+ *l4_rxhash = false;
return 0;
}
@@ -323,7 +362,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->tpa_state = BNX2X_TPA_START;
tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
tpa_info->placement_offset = cqe->placement_offset;
- tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
+ tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
if (fp->mode == TPA_MODE_GRO) {
u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
tpa_info->full_page =
@@ -479,7 +518,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
where we are and drop the whole packet */
err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
if (unlikely(err)) {
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
return err;
}
@@ -558,6 +597,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
skb_reserve(skb, pad + NET_SKB_PAD);
skb_put(skb, len);
skb->rxhash = tpa_info->rxhash;
+ skb->l4_rxhash = tpa_info->l4_rxhash;
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -584,7 +624,7 @@ drop:
/* drop the packet and keep the buffer in the bin */
DP(NETIF_MSG_RX_STATUS,
"Failed to allocate or map a new skb - dropping packet!\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
}
static int bnx2x_alloc_rx_data(struct bnx2x *bp,
@@ -617,6 +657,27 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
return 0;
}
+static
+void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
+ struct bnx2x_fastpath *fp,
+ struct bnx2x_eth_q_stats *qstats)
+{
+ /* Do nothing if no IP/L4 csum validation was done */
+
+ if (cqe->fast_path_cqe.status_flags &
+ (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
+ ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
+ return;
+
+ /* If both IP/L4 validation were done, check if an error was found. */
+
+ if (cqe->fast_path_cqe.type_error_flags &
+ (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
+ ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
+ qstats->hw_csum_err++;
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
{
@@ -660,6 +721,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
enum eth_rx_cqe_type cqe_fp_type;
u16 len, pad, queue;
u8 *data;
+ bool l4_rxhash;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -757,7 +819,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR flags %x rx packet %u\n",
cqe_fp_flags, sw_comp_cons);
- fp->eth_q_stats.rx_err_discard_pkt++;
+ bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
goto reuse_rx;
}
@@ -770,7 +832,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
if (skb == NULL) {
DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR packet dropped because of alloc failure\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
goto reuse_rx;
}
memcpy(skb->data, data + pad, len);
@@ -784,14 +846,15 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
skb = build_skb(data, 0);
if (unlikely(!skb)) {
kfree(data);
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_qstats(bp, fp)->
+ rx_skb_alloc_failed++;
goto next_rx;
}
skb_reserve(skb, pad);
} else {
DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR packet dropped because of alloc failure\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
reuse_rx:
bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
goto next_rx;
@@ -802,17 +865,14 @@ reuse_rx:
skb->protocol = eth_type_trans(skb, bp->dev);
/* Set Toeplitz hash for a none-LRO skb */
- skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
+ skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
+ skb->l4_rxhash = l4_rxhash;
skb_checksum_none_assert(skb);
- if (bp->dev->features & NETIF_F_RXCSUM) {
-
- if (likely(BNX2X_RX_CSUM_OK(cqe)))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- fp->eth_q_stats.hw_csum_err++;
- }
+ if (bp->dev->features & NETIF_F_RXCSUM)
+ bnx2x_csum_validate(skb, cqe, fp,
+ bnx2x_fp_qstats(bp, fp));
skb_record_rx_queue(skb, fp->rx_queue);
@@ -873,7 +933,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
prefetch(fp->rx_cons_sb);
for_each_cos_in_tx_queue(fp, cos)
- prefetch(fp->txdata[cos].tx_cons_sb);
+ prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
prefetch(&fp->sb_running_index[SM_RX_ID]);
napi_schedule(&bnx2x_fp(bp, fp->index, napi));
@@ -1190,7 +1250,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
for_each_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
for_each_cos_in_tx_queue(fp, cos) {
- struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
unsigned pkts_compl = 0, bytes_compl = 0;
u16 sw_prod = txdata->tx_pkt_prod;
@@ -1202,7 +1262,8 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
sw_cons++;
}
netdev_tx_reset_queue(
- netdev_get_tx_queue(bp->dev, txdata->txq_index));
+ netdev_get_tx_queue(bp->dev,
+ txdata->txq_index));
}
}
}
@@ -1310,7 +1371,7 @@ void bnx2x_free_irq(struct bnx2x *bp)
free_irq(bp->dev->irq, bp->dev);
}
-int __devinit bnx2x_enable_msix(struct bnx2x *bp)
+int bnx2x_enable_msix(struct bnx2x *bp)
{
int msix_vec = 0, i, rc, req_cnt;
@@ -1564,6 +1625,8 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
#endif
/* Add special queues */
bp->num_queues += NON_ETH_CONTEXT_USE;
+
+ BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
}
/**
@@ -1592,8 +1655,8 @@ static int bnx2x_set_real_num_queues(struct bnx2x *bp)
{
int rc, tx, rx;
- tx = MAX_TXQS_PER_COS * bp->max_cos;
- rx = BNX2X_NUM_ETH_QUEUES(bp);
+ tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
+ rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE;
/* account for fcoe queue */
#ifdef BCM_CNIC
@@ -1651,14 +1714,13 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
static int bnx2x_init_rss_pf(struct bnx2x *bp)
{
int i;
- u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
/* Prepare the initial contents fo the indirection table if RSS is
* enabled
*/
- for (i = 0; i < sizeof(ind_table); i++)
- ind_table[i] =
+ for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
+ bp->rss_conf_obj.ind_table[i] =
bp->fp->cl_id +
ethtool_rxfh_indir_default(i, num_eth_queues);
@@ -1670,12 +1732,11 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp)
* For 57712 and newer on the other hand it's a per-function
* configuration.
*/
- return bnx2x_config_rss_eth(bp, ind_table,
- bp->port.pmf || !CHIP_IS_E1x(bp));
+ return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
}
int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
- u8 *ind_table, bool config_hash)
+ bool config_hash)
{
struct bnx2x_config_rss_params params = {NULL};
int i;
@@ -1698,11 +1759,15 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
__set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
__set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
__set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
+ if (rss_obj->udp_rss_v4)
+ __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
+ if (rss_obj->udp_rss_v6)
+ __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
/* Hash bits */
params.rss_result_mask = MULTI_MASK;
- memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
+ memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
if (config_hash) {
/* RSS keys */
@@ -1739,7 +1804,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
int rc;
unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
struct bnx2x_mcast_ramrod_params rparam = {NULL};
- struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+ struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
/***************** Cleanup MACs' object first *************************/
@@ -1750,7 +1815,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
/* Clean ETH primary MAC */
__set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
- rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
+ rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
&ramrod_flags);
if (rc != 0)
BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
@@ -1836,11 +1901,16 @@ bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
static void bnx2x_bz_fp(struct bnx2x *bp, int index)
{
struct bnx2x_fastpath *fp = &bp->fp[index];
+ struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
+
+ int cos;
struct napi_struct orig_napi = fp->napi;
+ struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
/* bzero bnx2x_fastpath contents */
- if (bp->stats_init)
+ if (bp->stats_init) {
+ memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
memset(fp, 0, sizeof(*fp));
- else {
+ } else {
/* Keep Queue statistics */
struct bnx2x_eth_q_stats *tmp_eth_q_stats;
struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
@@ -1848,26 +1918,27 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
GFP_KERNEL);
if (tmp_eth_q_stats)
- memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
+ memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
sizeof(struct bnx2x_eth_q_stats));
tmp_eth_q_stats_old =
kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
GFP_KERNEL);
if (tmp_eth_q_stats_old)
- memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
+ memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
sizeof(struct bnx2x_eth_q_stats_old));
+ memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
memset(fp, 0, sizeof(*fp));
if (tmp_eth_q_stats) {
- memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
- sizeof(struct bnx2x_eth_q_stats));
+ memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
+ sizeof(struct bnx2x_eth_q_stats));
kfree(tmp_eth_q_stats);
}
if (tmp_eth_q_stats_old) {
- memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
+ memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
sizeof(struct bnx2x_eth_q_stats_old));
kfree(tmp_eth_q_stats_old);
}
@@ -1876,7 +1947,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
/* Restore the NAPI object as it has been already initialized */
fp->napi = orig_napi;
-
+ fp->tpa_info = orig_tpa_info;
fp->bp = bp;
fp->index = index;
if (IS_ETH_FP(fp))
@@ -1885,6 +1956,16 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
/* Special queues support only one CoS */
fp->max_cos = 1;
+ /* Init txdata pointers */
+#ifdef BCM_CNIC
+ if (IS_FCOE_FP(fp))
+ fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
+#endif
+ if (IS_ETH_FP(fp))
+ for_each_cos_in_tx_queue(fp, cos)
+ fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
+ BNX2X_NUM_ETH_QUEUES(bp) + index];
+
/*
* set the tpa flag for each queue. The tpa flag determines the queue
* minimal size so it must be set prior to queue memory allocation
@@ -1934,11 +2015,13 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/*
* Zero fastpath structures preserving invariants like napi, which are
* allocated only once, fp index, max_cos, bp pointer.
- * Also set fp->disable_tpa.
+ * Also set fp->disable_tpa and txdata_ptr.
*/
DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
for_each_queue(bp, i)
bnx2x_bz_fp(bp, i);
+ memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size *
+ sizeof(struct bnx2x_fp_txdata));
/* Set the receive queues buffer size */
@@ -2161,6 +2244,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
break;
case LOAD_DIAG:
+ case LOAD_LOOPBACK_EXT:
bp->state = BNX2X_STATE_DIAG;
break;
@@ -2180,6 +2264,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* re-read iscsi info */
bnx2x_get_iscsi_info(bp);
bnx2x_setup_cnic_irq_info(bp);
+ bnx2x_setup_cnic_info(bp);
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
#endif
@@ -2200,7 +2285,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
return -EBUSY;
}
- bnx2x_dcbx_init(bp);
+ /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
+ if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
+ bnx2x_dcbx_init(bp, false);
+
return 0;
#ifndef BNX2X_STOP_ON_ERROR
@@ -2283,6 +2371,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
/* Stop Tx */
bnx2x_tx_disable(bp);
+ netdev_reset_tc(bp->dev);
#ifdef BCM_CNIC
bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
@@ -2441,8 +2530,8 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
#endif
for_each_cos_in_tx_queue(fp, cos)
- if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
- bnx2x_tx_int(bp, &fp->txdata[cos]);
+ if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
+ bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
if (bnx2x_has_rx_work(fp)) {
@@ -2501,8 +2590,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
/* we split the first BD into headers and data BDs
* to ease the pain of our fellow microcode engineers
* we use one mapping for both BDs
- * So far this has only been observed to happen
- * in Other Operating Systems(TM)
*/
static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
struct bnx2x_fp_txdata *txdata,
@@ -2821,7 +2908,6 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- struct bnx2x_fastpath *fp;
struct netdev_queue *txq;
struct bnx2x_fp_txdata *txdata;
struct sw_tx_bd *tx_buf;
@@ -2831,7 +2917,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
u32 pbd_e2_parsing_data = 0;
u16 pkt_prod, bd_prod;
- int nbd, txq_index, fp_index, txdata_index;
+ int nbd, txq_index;
dma_addr_t mapping;
u32 xmit_type = bnx2x_xmit_type(bp, skb);
int i;
@@ -2850,31 +2936,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
- /* decode the fastpath index and the cos index from the txq */
- fp_index = TXQ_TO_FP(txq_index);
- txdata_index = TXQ_TO_COS(txq_index);
-
-#ifdef BCM_CNIC
- /*
- * Override the above for the FCoE queue:
- * - FCoE fp entry is right after the ETH entries.
- * - FCoE L2 queue uses bp->txdata[0] only.
- */
- if (unlikely(!NO_FCOE(bp) && (txq_index ==
- bnx2x_fcoe_tx(bp, txq_index)))) {
- fp_index = FCOE_IDX;
- txdata_index = 0;
- }
-#endif
+ txdata = &bp->bnx2x_txq[txq_index];
/* enable this debug print to view the transmission queue being used
DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
txq_index, fp_index, txdata_index); */
- /* locate the fastpath and the txdata */
- fp = &bp->fp[fp_index];
- txdata = &fp->txdata[txdata_index];
-
/* enable this debug print to view the tranmission details
DP(NETIF_MSG_TX_QUEUED,
"transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
@@ -2882,7 +2949,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(bnx2x_tx_avail(bp, txdata) <
(skb_shinfo(skb)->nr_frags + 3))) {
- fp->eth_q_stats.driver_xoff++;
+ bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
netif_tx_stop_queue(txq);
BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
return NETDEV_TX_BUSY;
@@ -3156,7 +3223,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
txdata->tx_bd_prod += nbd;
- if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
+ if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
netif_tx_stop_queue(txq);
/* paired memory barrier is in bnx2x_tx_int(), we have to keep
@@ -3164,8 +3231,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
* fp->bd_tx_cons */
smp_mb();
- fp->eth_q_stats.driver_xoff++;
- if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
+ bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
+ if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
netif_tx_wake_queue(txq);
}
txdata->tx_pkt++;
@@ -3230,7 +3297,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
/* configure traffic class to transmission queue mapping */
for (cos = 0; cos < bp->max_cos; cos++) {
count = BNX2X_NUM_ETH_QUEUES(bp);
- offset = cos * MAX_TXQS_PER_COS;
+ offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
netdev_set_tc_queue(dev, cos, count, offset);
DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
"mapping tc %d to offset %d count %d\n",
@@ -3329,7 +3396,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
if (!skip_tx_queue(bp, fp_index)) {
/* fastpath tx rings: tx_buf tx_desc */
for_each_cos_in_tx_queue(fp, cos) {
- struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
DP(NETIF_MSG_IFDOWN,
"freeing tx memory of fp %d cos %d cid %d\n",
@@ -3401,7 +3468,7 @@ static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
cqe_ring_prod);
fp->rx_pkt = fp->rx_calls = 0;
- fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
+ bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
return i - failure_cnt;
}
@@ -3486,7 +3553,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
if (!skip_tx_queue(bp, index)) {
/* fastpath tx rings: tx_buf tx_desc */
for_each_cos_in_tx_queue(fp, cos) {
- struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
DP(NETIF_MSG_IFUP,
"allocating tx memory of fp %d cos %d\n",
@@ -3569,7 +3636,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
#ifdef BCM_CNIC
if (!NO_FCOE(bp))
/* FCoE */
- if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
+ if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
/* we will fail load process instead of mark
* NO_FCOE_FLAG
*/
@@ -3594,7 +3661,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
*/
/* move FCoE fp even NO_FCOE_FLAG is on */
- bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
+ bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
#endif
bp->num_queues -= delta;
BNX2X_ERR("Adjusted num of queues from %d to %d\n",
@@ -3606,7 +3673,11 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
void bnx2x_free_mem_bp(struct bnx2x *bp)
{
+ kfree(bp->fp->tpa_info);
kfree(bp->fp);
+ kfree(bp->sp_objs);
+ kfree(bp->fp_stats);
+ kfree(bp->bnx2x_txq);
kfree(bp->msix_table);
kfree(bp->ilt);
}
@@ -3617,6 +3688,8 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
struct msix_entry *tbl;
struct bnx2x_ilt *ilt;
int msix_table_size = 0;
+ int fp_array_size;
+ int i;
/*
* The biggest MSI-X table we might need is as a maximum number of fast
@@ -3625,12 +3698,44 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
msix_table_size = bp->igu_sb_cnt + 1;
/* fp array: RSS plus CNIC related L2 queues */
- fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
- sizeof(*fp), GFP_KERNEL);
+ fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE;
+ BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
+
+ fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
if (!fp)
goto alloc_err;
+ for (i = 0; i < fp_array_size; i++) {
+ fp[i].tpa_info =
+ kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
+ sizeof(struct bnx2x_agg_info), GFP_KERNEL);
+ if (!(fp[i].tpa_info))
+ goto alloc_err;
+ }
+
bp->fp = fp;
+ /* allocate sp objs */
+ bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
+ GFP_KERNEL);
+ if (!bp->sp_objs)
+ goto alloc_err;
+
+ /* allocate fp_stats */
+ bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
+ GFP_KERNEL);
+ if (!bp->fp_stats)
+ goto alloc_err;
+
+ /* Allocate memory for the transmission queues array */
+ bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS;
+#ifdef BCM_CNIC
+ bp->bnx2x_txq_size++;
+#endif
+ bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size,
+ sizeof(struct bnx2x_fp_txdata), GFP_KERNEL);
+ if (!bp->bnx2x_txq)
+ goto alloc_err;
+
/* msix table */
tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
if (!tbl)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 7cd99b7..dfa757e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -29,6 +29,7 @@
extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
extern int num_queues;
+extern int int_mode;
/************************ Macros ********************************/
#define BNX2X_PCI_FREE(x, y, size) \
@@ -89,12 +90,12 @@ void bnx2x_send_unload_done(struct bnx2x *bp);
* bnx2x_config_rss_pf - configure RSS parameters in a PF.
*
* @bp: driver handle
- * @rss_obj RSS object to use
+ * @rss_obj: RSS object to use
* @ind_table: indirection table to configure
* @config_hash: re-configure RSS hash keys configuration
*/
int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
- u8 *ind_table, bool config_hash);
+ bool config_hash);
/**
* bnx2x__init_func_obj - init function object
@@ -244,6 +245,14 @@ int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
* @bp: driver handle
*/
void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
+
+/**
+ * bnx2x_setup_cnic_info - provides cnic with updated info
+ *
+ * @bp: driver handle
+ */
+void bnx2x_setup_cnic_info(struct bnx2x *bp);
+
#endif
/**
@@ -409,7 +418,7 @@ void bnx2x_ilt_set_info(struct bnx2x *bp);
*
* @bp: driver handle
*/
-void bnx2x_dcbx_init(struct bnx2x *bp);
+void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem);
/**
* bnx2x_set_power_state - set power state to the requested value.
@@ -487,7 +496,7 @@ void bnx2x_netif_start(struct bnx2x *bp);
* fills msix_table, requests vectors, updates num_queues
* according to number of available vectors.
*/
-int __devinit bnx2x_enable_msix(struct bnx2x *bp);
+int bnx2x_enable_msix(struct bnx2x *bp);
/**
* bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
@@ -728,7 +737,7 @@ static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
{
u8 cos;
for_each_cos_in_tx_queue(fp, cos)
- if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
+ if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
return true;
return false;
}
@@ -780,8 +789,10 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
{
int i;
+ bp->num_napi_queues = bp->num_queues;
+
/* Add NAPI objects */
- for_each_rx_queue(bp, i)
+ for_each_napi_rx_queue(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
bnx2x_poll, BNX2X_NAPI_WEIGHT);
}
@@ -790,10 +801,12 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
{
int i;
- for_each_rx_queue(bp, i)
+ for_each_napi_rx_queue(bp, i)
netif_napi_del(&bnx2x_fp(bp, i, napi));
}
+void bnx2x_set_int_mode(struct bnx2x *bp);
+
static inline void bnx2x_disable_msi(struct bnx2x *bp)
{
if (bp->flags & USING_MSIX_FLAG) {
@@ -809,7 +822,8 @@ static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
{
return num_queues ?
min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
- min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp));
+ min_t(int, netif_get_num_default_rss_queues(),
+ BNX2X_MAX_QUEUES(bp));
}
static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
@@ -865,11 +879,9 @@ static inline int func_by_vn(struct bnx2x *bp, int vn)
return 2 * vn + BP_PORT(bp);
}
-static inline int bnx2x_config_rss_eth(struct bnx2x *bp, u8 *ind_table,
- bool config_hash)
+static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash)
{
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, ind_table,
- config_hash);
+ return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash);
}
/**
@@ -975,8 +987,8 @@ static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
struct bnx2x *bp = fp->bp;
/* Configure classification DBs */
- bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid,
- BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
+ bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id,
+ fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
bnx2x_sp_mapping(bp, mac_rdata),
BNX2X_FILTER_MAC_PENDING,
&bp->sp_state, obj_type,
@@ -1068,12 +1080,14 @@ static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
}
static inline void bnx2x_init_txdata(struct bnx2x *bp,
- struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index,
- __le16 *tx_cons_sb)
+ struct bnx2x_fp_txdata *txdata, u32 cid,
+ int txq_index, __le16 *tx_cons_sb,
+ struct bnx2x_fastpath *fp)
{
txdata->cid = cid;
txdata->txq_index = txq_index;
txdata->tx_cons_sb = tx_cons_sb;
+ txdata->parent_fp = fp;
DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n",
txdata->cid, txdata->txq_index);
@@ -1107,18 +1121,13 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
BNX2X_FCOE_ETH_CL_ID_IDX);
- /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than
- * 16 ETH clients per function when CNIC is enabled!
- *
- * Fix it ASAP!!!
- */
- bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
+ bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
-
- bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]),
- fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX);
+ bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
+ fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
+ fp);
DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
@@ -1135,8 +1144,8 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
/* No multi-CoS for FCoE L2 client */
BUG_ON(fp->max_cos != 1);
- bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1,
- BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
+ &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
bnx2x_sp_mapping(bp, q_rdata), q_type);
DP(NETIF_MSG_IFUP,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 4f9244b..8a73374 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -972,23 +972,26 @@ void bnx2x_dcbx_init_params(struct bnx2x *bp)
bp->dcbx_config_params.admin_default_priority = 0;
}
-void bnx2x_dcbx_init(struct bnx2x *bp)
+void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem)
{
u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE;
+ /* only PMF can send ADMIN msg to MFW in old MFW versions */
+ if ((!bp->port.pmf) && (!(bp->flags & BC_SUPPORTS_DCBX_MSG_NON_PMF)))
+ return;
+
if (bp->dcbx_enabled <= 0)
return;
/* validate:
* chip of good for dcbx version,
* dcb is wanted
- * the function is pmf
* shmem2 contains DCBX support fields
*/
DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n",
bp->dcb_state, bp->port.pmf);
- if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf &&
+ if (bp->dcb_state == BNX2X_DCB_STATE_ON &&
SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
dcbx_lldp_params_offset =
SHMEM2_RD(bp, dcbx_lldp_params_offset);
@@ -999,12 +1002,23 @@ void bnx2x_dcbx_init(struct bnx2x *bp)
bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
- bnx2x_dcbx_admin_mib_updated_params(bp,
- dcbx_lldp_params_offset);
+ /* need HW lock to avoid scenario of two drivers
+ * writing in parallel to shmem
+ */
+ bnx2x_acquire_hw_lock(bp,
+ HW_LOCK_RESOURCE_DCBX_ADMIN_MIB);
+ if (update_shmem)
+ bnx2x_dcbx_admin_mib_updated_params(bp,
+ dcbx_lldp_params_offset);
/* Let HW start negotiation */
bnx2x_fw_command(bp,
DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
+ /* release HW lock only after MFW acks that it finished
+ * reading values from shmem
+ */
+ bnx2x_release_hw_lock(bp,
+ HW_LOCK_RESOURCE_DCBX_ADMIN_MIB);
}
}
}
@@ -2063,10 +2077,8 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
"Handling parity error recovery. Try again later\n");
return 1;
}
- if (netif_running(bp->dev)) {
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
- rc = bnx2x_nic_load(bp, LOAD_NORMAL);
- }
+ if (netif_running(bp->dev))
+ bnx2x_dcbx_init(bp, true);
DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc);
if (rc)
return 1;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index ddc18ee..bff3129 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -177,6 +177,8 @@ static const struct {
4, STATS_FLAGS_FUNC, "recoverable_errors" },
{ STATS_OFFSET32(unrecoverable_error),
4, STATS_FLAGS_FUNC, "unrecoverable_errors" },
+ { STATS_OFFSET32(eee_tx_lpi),
+ 4, STATS_FLAGS_PORT, "Tx LPI entry count"}
};
#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
@@ -185,7 +187,8 @@ static int bnx2x_get_port_type(struct bnx2x *bp)
int port_type;
u32 phy_idx = bnx2x_get_cur_phy_idx(bp);
switch (bp->link_params.phy[phy_idx].media_type) {
- case ETH_PHY_SFP_FIBER:
+ case ETH_PHY_SFPP_10G_FIBER:
+ case ETH_PHY_SFP_1G_FIBER:
case ETH_PHY_XFP_FIBER:
case ETH_PHY_KR:
case ETH_PHY_CX4:
@@ -218,6 +221,11 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
(bp->port.supported[cfg_idx ^ 1] &
(SUPPORTED_TP | SUPPORTED_FIBRE));
cmd->advertising = bp->port.advertising[cfg_idx];
+ if (bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type ==
+ ETH_PHY_SFP_1G_FIBER) {
+ cmd->supported &= ~(SUPPORTED_10000baseT_Full);
+ cmd->advertising &= ~(ADVERTISED_10000baseT_Full);
+ }
if ((bp->state == BNX2X_STATE_OPEN) && (bp->link_vars.link_up)) {
if (!(bp->flags & MF_FUNC_DIS)) {
@@ -293,7 +301,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
- u32 speed;
+ u32 speed, phy_idx;
if (IS_MF_SD(bp))
return 0;
@@ -548,9 +556,11 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
"10G half not supported\n");
return -EINVAL;
}
-
+ phy_idx = bnx2x_get_cur_phy_idx(bp);
if (!(bp->port.supported[cfg_idx]
- & SUPPORTED_10000baseT_Full)) {
+ & SUPPORTED_10000baseT_Full) ||
+ (bp->link_params.phy[phy_idx].media_type ==
+ ETH_PHY_SFP_1G_FIBER)) {
DP(BNX2X_MSG_ETHTOOL,
"10G full not supported\n");
return -EINVAL;
@@ -824,7 +834,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = BNX2X_NUM_STATS;
- info->testinfo_len = BNX2X_NUM_TESTS;
+ info->testinfo_len = BNX2X_NUM_TESTS(bp);
info->eedump_len = bp->common.flash_size;
info->regdump_len = bnx2x_get_regs_len(dev);
}
@@ -1150,6 +1160,65 @@ static int bnx2x_get_eeprom(struct net_device *dev,
return rc;
}
+static int bnx2x_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc = 0, phy_idx;
+ u8 *user_data = data;
+ int remaining_len = ee->len, xfer_size;
+ unsigned int page_off = ee->offset;
+
+ if (!netif_running(dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
+ return -EAGAIN;
+ }
+
+ phy_idx = bnx2x_get_cur_phy_idx(bp);
+ bnx2x_acquire_phy_lock(bp);
+ while (!rc && remaining_len > 0) {
+ xfer_size = (remaining_len > SFP_EEPROM_PAGE_SIZE) ?
+ SFP_EEPROM_PAGE_SIZE : remaining_len;
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ page_off,
+ xfer_size,
+ user_data);
+ remaining_len -= xfer_size;
+ user_data += xfer_size;
+ page_off += xfer_size;
+ }
+
+ bnx2x_release_phy_lock(bp);
+ return rc;
+}
+
+static int bnx2x_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int phy_idx;
+ if (!netif_running(dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
+ return -EAGAIN;
+ }
+
+ phy_idx = bnx2x_get_cur_phy_idx(bp);
+ switch (bp->link_params.phy[phy_idx].media_type) {
+ case ETH_PHY_SFPP_10G_FIBER:
+ case ETH_PHY_SFP_1G_FIBER:
+ case ETH_PHY_DA_TWINAX:
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
u32 cmd_flags)
{
@@ -1531,18 +1600,146 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
return 0;
}
-static const struct {
- char string[ETH_GSTRING_LEN];
-} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
- { "register_test (offline)" },
- { "memory_test (offline)" },
- { "loopback_test (offline)" },
- { "nvram_test (online)" },
- { "interrupt_test (online)" },
- { "link_test (online)" },
- { "idle check (online)" }
+static char *bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF] = {
+ "register_test (offline) ",
+ "memory_test (offline) ",
+ "int_loopback_test (offline)",
+ "ext_loopback_test (offline)",
+ "nvram_test (online) ",
+ "interrupt_test (online) ",
+ "link_test (online) "
};
+static u32 bnx2x_eee_to_adv(u32 eee_adv)
+{
+ u32 modes = 0;
+
+ if (eee_adv & SHMEM_EEE_100M_ADV)
+ modes |= ADVERTISED_100baseT_Full;
+ if (eee_adv & SHMEM_EEE_1G_ADV)
+ modes |= ADVERTISED_1000baseT_Full;
+ if (eee_adv & SHMEM_EEE_10G_ADV)
+ modes |= ADVERTISED_10000baseT_Full;
+
+ return modes;
+}
+
+static u32 bnx2x_adv_to_eee(u32 modes, u32 shift)
+{
+ u32 eee_adv = 0;
+ if (modes & ADVERTISED_100baseT_Full)
+ eee_adv |= SHMEM_EEE_100M_ADV;
+ if (modes & ADVERTISED_1000baseT_Full)
+ eee_adv |= SHMEM_EEE_1G_ADV;
+ if (modes & ADVERTISED_10000baseT_Full)
+ eee_adv |= SHMEM_EEE_10G_ADV;
+
+ return eee_adv << shift;
+}
+
+static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 eee_cfg;
+
+ if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
+ DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
+ return -EOPNOTSUPP;
+ }
+
+ eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]);
+
+ edata->supported =
+ bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
+ SHMEM_EEE_SUPPORTED_SHIFT);
+
+ edata->advertised =
+ bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
+ SHMEM_EEE_ADV_STATUS_SHIFT);
+ edata->lp_advertised =
+ bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
+ SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+
+ /* SHMEM value is in 16u units --> Convert to 1u units. */
+ edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4;
+
+ edata->eee_enabled = (eee_cfg & SHMEM_EEE_REQUESTED_BIT) ? 1 : 0;
+ edata->eee_active = (eee_cfg & SHMEM_EEE_ACTIVE_BIT) ? 1 : 0;
+ edata->tx_lpi_enabled = (eee_cfg & SHMEM_EEE_LPI_REQUESTED_BIT) ? 1 : 0;
+
+ return 0;
+}
+
+static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 eee_cfg;
+ u32 advertised;
+
+ if (IS_MF(bp))
+ return 0;
+
+ if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
+ DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
+ return -EOPNOTSUPP;
+ }
+
+ eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]);
+
+ if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) {
+ DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n");
+ return -EOPNOTSUPP;
+ }
+
+ advertised = bnx2x_adv_to_eee(edata->advertised,
+ SHMEM_EEE_ADV_STATUS_SHIFT);
+ if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Direct manipulation of EEE advertisment is not supported\n");
+ return -EINVAL;
+ }
+
+ if (edata->tx_lpi_timer > EEE_MODE_TIMER_MASK) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Maximal Tx Lpi timer supported is %x(u)\n",
+ EEE_MODE_TIMER_MASK);
+ return -EINVAL;
+ }
+ if (edata->tx_lpi_enabled &&
+ (edata->tx_lpi_timer < EEE_MODE_NVRAM_AGGRESSIVE_TIME)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Minimal Tx Lpi timer supported is %d(u)\n",
+ EEE_MODE_NVRAM_AGGRESSIVE_TIME);
+ return -EINVAL;
+ }
+
+ /* All is well; Apply changes*/
+ if (edata->eee_enabled)
+ bp->link_params.eee_mode |= EEE_MODE_ADV_LPI;
+ else
+ bp->link_params.eee_mode &= ~EEE_MODE_ADV_LPI;
+
+ if (edata->tx_lpi_enabled)
+ bp->link_params.eee_mode |= EEE_MODE_ENABLE_LPI;
+ else
+ bp->link_params.eee_mode &= ~EEE_MODE_ENABLE_LPI;
+
+ bp->link_params.eee_mode &= ~EEE_MODE_TIMER_MASK;
+ bp->link_params.eee_mode |= (edata->tx_lpi_timer &
+ EEE_MODE_TIMER_MASK) |
+ EEE_MODE_OVERRIDE_NVRAM |
+ EEE_MODE_OUTPUT_TIME;
+
+ /* Restart link to propogate changes */
+ if (netif_running(dev)) {
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_link_set(bp);
+ }
+
+ return 0;
+}
+
+
enum {
BNX2X_CHIP_E1_OFST = 0,
BNX2X_CHIP_E1H_OFST,
@@ -1811,6 +2008,14 @@ static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n");
+
+ cnt = 1400;
+ while (!bp->link_vars.link_up && cnt--)
+ msleep(20);
+
+ if (cnt <= 0 && !bp->link_vars.link_up)
+ DP(BNX2X_MSG_ETHTOOL,
+ "Timeout waiting for link init\n");
}
}
@@ -1821,7 +2026,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
unsigned char *packet;
struct bnx2x_fastpath *fp_rx = &bp->fp[0];
struct bnx2x_fastpath *fp_tx = &bp->fp[0];
- struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0];
+ struct bnx2x_fp_txdata *txdata = fp_tx->txdata_ptr[0];
u16 tx_start_idx, tx_idx;
u16 rx_start_idx, rx_idx;
u16 pkt_prod, bd_prod;
@@ -1836,13 +2041,16 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
u16 len;
int rc = -ENODEV;
u8 *data;
- struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
+ struct netdev_queue *txq = netdev_get_tx_queue(bp->dev,
+ txdata->txq_index);
/* check the loopback mode */
switch (loopback_mode) {
case BNX2X_PHY_LOOPBACK:
- if (bp->link_params.loopback_mode != LOOPBACK_XGXS)
+ if (bp->link_params.loopback_mode != LOOPBACK_XGXS) {
+ DP(BNX2X_MSG_ETHTOOL, "PHY loopback not supported\n");
return -EINVAL;
+ }
break;
case BNX2X_MAC_LOOPBACK:
if (CHIP_IS_E3(bp)) {
@@ -1859,6 +2067,13 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
break;
+ case BNX2X_EXT_LOOPBACK:
+ if (bp->link_params.loopback_mode != LOOPBACK_EXT) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't configure external loopback\n");
+ return -EINVAL;
+ }
+ break;
default:
DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
return -EINVAL;
@@ -2030,6 +2245,38 @@ static int bnx2x_test_loopback(struct bnx2x *bp)
return rc;
}
+static int bnx2x_test_ext_loopback(struct bnx2x *bp)
+{
+ int rc;
+ u8 is_serdes =
+ (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
+
+ if (BP_NOMCP(bp))
+ return -ENODEV;
+
+ if (!netif_running(bp->dev))
+ return BNX2X_EXT_LOOPBACK_FAILED;
+
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test, nic_load (for external lb) failed\n");
+ return -ENODEV;
+ }
+ bnx2x_wait_for_link(bp, 1, is_serdes);
+
+ bnx2x_netif_stop(bp, 1);
+
+ rc = bnx2x_run_loopback(bp, BNX2X_EXT_LOOPBACK);
+ if (rc)
+ DP(BNX2X_MSG_ETHTOOL, "EXT loopback failed (res %d)\n", rc);
+
+ bnx2x_netif_start(bp);
+
+ return rc;
+}
+
#define CRC32_RESIDUAL 0xdebb20e3
static int bnx2x_test_nvram(struct bnx2x *bp)
@@ -2112,7 +2359,7 @@ static int bnx2x_test_intr(struct bnx2x *bp)
return -ENODEV;
}
- params.q_obj = &bp->fp->q_obj;
+ params.q_obj = &bp->sp_objs->q_obj;
params.cmd = BNX2X_Q_CMD_EMPTY;
__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
@@ -2125,24 +2372,31 @@ static void bnx2x_self_test(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
u8 is_serdes;
+ int rc;
+
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
netdev_err(bp->dev,
"Handling parity error recovery. Try again later\n");
etest->flags |= ETH_TEST_FL_FAILED;
return;
}
+ DP(BNX2X_MSG_ETHTOOL,
+ "Self-test command parameters: offline = %d, external_lb = %d\n",
+ (etest->flags & ETH_TEST_FL_OFFLINE),
+ (etest->flags & ETH_TEST_FL_EXTERNAL_LB)>>2);
- memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
+ memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
- if (!netif_running(dev))
+ if (!netif_running(dev)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test when interface is down\n");
return;
+ }
- /* offline tests are not supported in MF mode */
- if (IS_MF(bp))
- etest->flags &= ~ETH_TEST_FL_OFFLINE;
is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
- if (etest->flags & ETH_TEST_FL_OFFLINE) {
+ /* offline tests are not supported in MF mode */
+ if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) {
int port = BP_PORT(bp);
u32 val;
u8 link_up;
@@ -2155,7 +2409,14 @@ static void bnx2x_self_test(struct net_device *dev,
link_up = bp->link_vars.link_up;
bnx2x_nic_unload(bp, UNLOAD_NORMAL);
- bnx2x_nic_load(bp, LOAD_DIAG);
+ rc = bnx2x_nic_load(bp, LOAD_DIAG);
+ if (rc) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test, nic_load (for offline) failed\n");
+ return;
+ }
+
/* wait until link state is restored */
bnx2x_wait_for_link(bp, 1, is_serdes);
@@ -2168,30 +2429,51 @@ static void bnx2x_self_test(struct net_device *dev,
etest->flags |= ETH_TEST_FL_FAILED;
}
- buf[2] = bnx2x_test_loopback(bp);
+ buf[2] = bnx2x_test_loopback(bp); /* internal LB */
if (buf[2] != 0)
etest->flags |= ETH_TEST_FL_FAILED;
+ if (etest->flags & ETH_TEST_FL_EXTERNAL_LB) {
+ buf[3] = bnx2x_test_ext_loopback(bp); /* external LB */
+ if (buf[3] != 0)
+ etest->flags |= ETH_TEST_FL_FAILED;
+ etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+ }
+
bnx2x_nic_unload(bp, UNLOAD_NORMAL);
/* restore input for TX port IF */
REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
-
- bnx2x_nic_load(bp, LOAD_NORMAL);
+ rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+ if (rc) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test, nic_load (for online) failed\n");
+ return;
+ }
/* wait until link state is restored */
bnx2x_wait_for_link(bp, link_up, is_serdes);
}
if (bnx2x_test_nvram(bp) != 0) {
- buf[3] = 1;
+ if (!IS_MF(bp))
+ buf[4] = 1;
+ else
+ buf[0] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
if (bnx2x_test_intr(bp) != 0) {
- buf[4] = 1;
+ if (!IS_MF(bp))
+ buf[5] = 1;
+ else
+ buf[1] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
if (bnx2x_link_test(bp, is_serdes) != 0) {
- buf[5] = 1;
+ if (!IS_MF(bp))
+ buf[6] = 1;
+ else
+ buf[2] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
@@ -2236,7 +2518,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
return num_stats;
case ETH_SS_TEST:
- return BNX2X_NUM_TESTS;
+ return BNX2X_NUM_TESTS(bp);
default:
return -EINVAL;
@@ -2246,7 +2528,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
- int i, j, k;
+ int i, j, k, offset, start;
char queue_name[MAX_QUEUE_NAME_LEN+1];
switch (stringset) {
@@ -2277,7 +2559,17 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
break;
case ETH_SS_TEST:
- memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
+ /* First 4 tests cannot be done in MF mode */
+ if (!IS_MF(bp))
+ start = 0;
+ else
+ start = 4;
+ for (i = 0, j = start; j < (start + BNX2X_NUM_TESTS(bp));
+ i++, j++) {
+ offset = sprintf(buf+32*i, "%s",
+ bnx2x_tests_str_arr[j]);
+ *(buf+offset) = '\0';
+ }
break;
}
}
@@ -2291,7 +2583,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
if (is_multi(bp)) {
for_each_eth_queue(bp, i) {
- hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
+ hw_stats = (u32 *)&bp->fp_stats[i].eth_q_stats;
for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
if (bnx2x_q_stats_arr[j].size == 0) {
/* skip this counter */
@@ -2375,6 +2667,41 @@ static int bnx2x_set_phys_id(struct net_device *dev,
return 0;
}
+static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+{
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ if (bp->rss_conf_obj.udp_rss_v4)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V6_FLOW:
+ if (bp->rss_conf_obj.udp_rss_v6)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ info->data = 0;
+ break;
+ }
+
+ return 0;
+}
+
static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rules __always_unused)
{
@@ -2384,7 +2711,102 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
case ETHTOOL_GRXRINGS:
info->data = BNX2X_NUM_ETH_QUEUES(bp);
return 0;
+ case ETHTOOL_GRXFH:
+ return bnx2x_get_rss_flags(bp, info);
+ default:
+ DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+{
+ int udp_rss_requested;
+
+ DP(BNX2X_MSG_ETHTOOL,
+ "Set rss flags command parameters: flow type = %d, data = %llu\n",
+ info->flow_type, info->data);
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ /* For TCP only 4-tupple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Command parameters not supported\n");
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ /* For UDP either 2-tupple hash or 4-tupple hash is supported */
+ if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ udp_rss_requested = 1;
+ else if (info->data == (RXH_IP_SRC | RXH_IP_DST))
+ udp_rss_requested = 0;
+ else
+ return -EINVAL;
+ if ((info->flow_type == UDP_V4_FLOW) &&
+ (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) {
+ bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested;
+ DP(BNX2X_MSG_ETHTOOL,
+ "rss re-configured, UDP 4-tupple %s\n",
+ udp_rss_requested ? "enabled" : "disabled");
+ return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
+ } else if ((info->flow_type == UDP_V6_FLOW) &&
+ (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
+ bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
+ return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
+ DP(BNX2X_MSG_ETHTOOL,
+ "rss re-configured, UDP 4-tupple %s\n",
+ udp_rss_requested ? "enabled" : "disabled");
+ } else {
+ return 0;
+ }
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ /* For IP only 2-tupple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Command parameters not supported\n");
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IP_USER_FLOW:
+ case ETHER_FLOW:
+ /* RSS is not supported for these protocols */
+ if (info->data) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Command parameters not supported\n");
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ return bnx2x_set_rss_flags(bp, info);
default:
DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
return -EOPNOTSUPP;
@@ -2424,7 +2846,6 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
- u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
/*
@@ -2436,10 +2857,88 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
* align the received table to the Client ID of the leading RSS
* queue
*/
- ind_table[i] = indir[i] + bp->fp->cl_id;
+ bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
}
- return bnx2x_config_rss_eth(bp, ind_table, false);
+ return bnx2x_config_rss_eth(bp, false);
+}
+
+/**
+ * bnx2x_get_channels - gets the number of RSS queues.
+ *
+ * @dev: net device
+ * @channels: returns the number of max / current queues
+ */
+static void bnx2x_get_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ channels->max_combined = BNX2X_MAX_RSS_COUNT(bp);
+ channels->combined_count = BNX2X_NUM_ETH_QUEUES(bp);
+}
+
+/**
+ * bnx2x_change_num_queues - change the number of RSS queues.
+ *
+ * @bp: bnx2x private structure
+ *
+ * Re-configure interrupt mode to get the new number of MSI-X
+ * vectors and re-add NAPI objects.
+ */
+static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
+{
+ bnx2x_del_all_napi(bp);
+ bnx2x_disable_msi(bp);
+ BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
+ bnx2x_set_int_mode(bp);
+ bnx2x_add_all_napi(bp);
+}
+
+/**
+ * bnx2x_set_channels - sets the number of RSS queues.
+ *
+ * @dev: net device
+ * @channels: includes the number of queues requested
+ */
+static int bnx2x_set_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+
+ DP(BNX2X_MSG_ETHTOOL,
+ "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
+ channels->rx_count, channels->tx_count, channels->other_count,
+ channels->combined_count);
+
+ /* We don't support separate rx / tx channels.
+ * We don't allow setting 'other' channels.
+ */
+ if (channels->rx_count || channels->tx_count || channels->other_count
+ || (channels->combined_count == 0) ||
+ (channels->combined_count > BNX2X_MAX_RSS_COUNT(bp))) {
+ DP(BNX2X_MSG_ETHTOOL, "command parameters not supported\n");
+ return -EINVAL;
+ }
+
+ /* Check if there was a change in the active parameters */
+ if (channels->combined_count == BNX2X_NUM_ETH_QUEUES(bp)) {
+ DP(BNX2X_MSG_ETHTOOL, "No change in active parameters\n");
+ return 0;
+ }
+
+ /* Set the requested number of queues in bp context.
+ * Note that the actual number of queues created during load may be
+ * less than requested if memory is low.
+ */
+ if (unlikely(!netif_running(dev))) {
+ bnx2x_change_num_queues(bp, channels->combined_count);
+ return 0;
+ }
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_change_num_queues(bp, channels->combined_count);
+ return bnx2x_nic_load(bp, LOAD_NORMAL);
}
static const struct ethtool_ops bnx2x_ethtool_ops = {
@@ -2469,9 +2968,16 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.set_phys_id = bnx2x_set_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
.get_rxnfc = bnx2x_get_rxnfc,
+ .set_rxnfc = bnx2x_set_rxnfc,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh_indir = bnx2x_get_rxfh_indir,
.set_rxfh_indir = bnx2x_set_rxfh_indir,
+ .get_channels = bnx2x_get_channels,
+ .set_channels = bnx2x_set_channels,
+ .get_module_info = bnx2x_get_module_info,
+ .get_module_eeprom = bnx2x_get_module_eeprom,
+ .get_eee = bnx2x_get_eee,
+ .set_eee = bnx2x_set_eee,
};
void bnx2x_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 426f77a..bbc66ce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -321,9 +321,7 @@
#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0
-/**
- * This file defines HSI constants common to all microcode flows
- */
+/* This file defines HSI constants common to all microcode flows */
#define PROTOCOL_STATE_BIT_OFFSET 6
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index a440a8b..76b6e65 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -10,6 +10,7 @@
#define BNX2X_HSI_H
#include "bnx2x_fw_defs.h"
+#include "bnx2x_mfw_req.h"
#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
@@ -33,12 +34,6 @@ struct license_key {
u32 reserved_b[4];
};
-
-#define PORT_0 0
-#define PORT_1 1
-#define PORT_MAX 2
-#define NVM_PATH_MAX 2
-
/****************************************************************************
* Shared HW configuration *
****************************************************************************/
@@ -1067,8 +1062,18 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
uses the same defines as link_config */
u32 mfw_wol_link_cfg2; /* 0x480 */
- u32 Reserved2[17]; /* 0x484 */
+ /* EEE power saving mode */
+ u32 eee_power_mode; /* 0x484 */
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_MASK 0x000000FF
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT 0
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED 0x00000001
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE 0x00000002
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY 0x00000003
+
+
+ u32 Reserved2[16]; /* 0x488 */
};
@@ -1140,6 +1145,7 @@ struct drv_port_mb {
u32 link_status;
/* Driver should update this field on any link change event */
+ #define LINK_STATUS_NONE (0<<0)
#define LINK_STATUS_LINK_FLAG_MASK 0x00000001
#define LINK_STATUS_LINK_UP 0x00000001
#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E
@@ -1197,6 +1203,7 @@ struct drv_port_mb {
#define LINK_STATUS_PFC_ENABLED 0x20000000
#define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000
+ #define LINK_STATUS_SFP_TX_FAULT 0x80000000
u32 port_stx;
@@ -1240,9 +1247,11 @@ struct drv_func_mb {
#define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002
#define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
#define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201
+ #define REQ_BC_VER_4_FCOE_FEATURES 0x00070209
#define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
#define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
+ #define REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF 0x00070401
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
@@ -1255,6 +1264,8 @@ struct drv_func_mb {
#define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000
#define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000
+ #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000
+
#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
#define REQ_BC_VER_4_SET_MF_BW 0x00060202
#define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
@@ -1320,6 +1331,8 @@ struct drv_func_mb {
#define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000
#define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000
+ #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000
+
#define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
#define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
@@ -1383,6 +1396,8 @@ struct drv_func_mb {
#define DRV_STATUS_DRV_INFO_REQ 0x04000000
+ #define DRV_STATUS_EEE_NEGOTIATION_RESULTS 0x08000000
+
u32 virt_mac_upper;
#define VIRT_MAC_SIGN_MASK 0xffff0000
#define VIRT_MAC_SIGNATURE 0x564d0000
@@ -1613,6 +1628,11 @@ struct fw_flr_mb {
struct fw_flr_ack ack;
};
+struct eee_remote_vals {
+ u32 tx_tw;
+ u32 rx_tw;
+};
+
/**** SUPPORT FOR SHMEM ARRRAYS ***
* The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to
* define arrays with storage types smaller then unsigned dwords.
@@ -2053,6 +2073,41 @@ struct shmem2_region {
#define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00
#define DRV_INFO_CONTROL_OP_CODE_SHIFT 8
u32 ibft_host_addr; /* initialized by option ROM */
+ struct eee_remote_vals eee_remote_vals[PORT_MAX];
+ u32 reserved[E2_FUNC_MAX];
+
+
+ /* the status of EEE auto-negotiation
+ * bits 15:0 the configured tx-lpi entry timer value. Depends on bit 31.
+ * bits 19:16 the supported modes for EEE.
+ * bits 23:20 the speeds advertised for EEE.
+ * bits 27:24 the speeds the Link partner advertised for EEE.
+ * The supported/adv. modes in bits 27:19 originate from the
+ * SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed).
+ * bit 28 when 1'b1 EEE was requested.
+ * bit 29 when 1'b1 tx lpi was requested.
+ * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted iff
+ * 30:29 are 2'b11.
+ * bit 31 when 1'b0 bits 15:0 contain a PORT_FEAT_CFG_EEE_ define as
+ * value. When 1'b1 those bits contains a value times 16 microseconds.
+ */
+ u32 eee_status[PORT_MAX];
+ #define SHMEM_EEE_TIMER_MASK 0x0000ffff
+ #define SHMEM_EEE_SUPPORTED_MASK 0x000f0000
+ #define SHMEM_EEE_SUPPORTED_SHIFT 16
+ #define SHMEM_EEE_ADV_STATUS_MASK 0x00f00000
+ #define SHMEM_EEE_100M_ADV (1<<0)
+ #define SHMEM_EEE_1G_ADV (1<<1)
+ #define SHMEM_EEE_10G_ADV (1<<2)
+ #define SHMEM_EEE_ADV_STATUS_SHIFT 20
+ #define SHMEM_EEE_LP_ADV_STATUS_MASK 0x0f000000
+ #define SHMEM_EEE_LP_ADV_STATUS_SHIFT 24
+ #define SHMEM_EEE_REQUESTED_BIT 0x10000000
+ #define SHMEM_EEE_LPI_REQUESTED_BIT 0x20000000
+ #define SHMEM_EEE_ACTIVE_BIT 0x40000000
+ #define SHMEM_EEE_TIME_OUTPUT_BIT 0x80000000
+
+ u32 sizeof_port_stats;
};
@@ -2599,6 +2654,9 @@ struct host_port_stats {
u32 pfc_frames_tx_lo;
u32 pfc_frames_rx_hi;
u32 pfc_frames_rx_lo;
+
+ u32 eee_lpi_count_hi;
+ u32 eee_lpi_count_lo;
};
@@ -2638,118 +2696,6 @@ struct host_func_stats {
/* VIC definitions */
#define VICSTATST_UIF_INDEX 2
-/* current drv_info version */
-#define DRV_INFO_CUR_VER 1
-
-/* drv_info op codes supported */
-enum drv_info_opcode {
- ETH_STATS_OPCODE,
- FCOE_STATS_OPCODE,
- ISCSI_STATS_OPCODE
-};
-
-#define ETH_STAT_INFO_VERSION_LEN 12
-/* Per PCI Function Ethernet Statistics required from the driver */
-struct eth_stats_info {
- /* Function's Driver Version. padded to 12 */
- u8 version[ETH_STAT_INFO_VERSION_LEN];
- /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
- u8 mac_local[8];
- u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
- u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
- u32 mtu_size; /* MTU Size. Note : Negotiated MTU */
- u32 feature_flags; /* Feature_Flags. */
-#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01
-#define FEATURE_ETH_LSO_MASK 0x02
-#define FEATURE_ETH_BOOTMODE_MASK 0x1C
-#define FEATURE_ETH_BOOTMODE_SHIFT 2
-#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2)
-#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2)
-#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2)
-#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2)
-#define FEATURE_ETH_TOE_MASK 0x20
- u32 lso_max_size; /* LSO MaxOffloadSize. */
- u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */
- /* Num Offloaded Connections TCP_IPv4. */
- u32 ipv4_ofld_cnt;
- /* Num Offloaded Connections TCP_IPv6. */
- u32 ipv6_ofld_cnt;
- u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */
- u32 txq_size; /* TX Descriptors Queue Size */
- u32 rxq_size; /* RX Descriptors Queue Size */
- /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
- u32 txq_avg_depth;
- /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
- u32 rxq_avg_depth;
- /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
- u32 iov_offload;
- /* Number of NetQueue/VMQ Config'd. */
- u32 netq_cnt;
- u32 vf_cnt; /* Num VF assigned to this PF. */
-};
-
-/* Per PCI Function FCOE Statistics required from the driver */
-struct fcoe_stats_info {
- u8 version[12]; /* Function's Driver Version. */
- u8 mac_local[8]; /* Locally Admin Addr. */
- u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
- u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
- /* QoS Priority (per 802.1p). 0-7255 */
- u32 qos_priority;
- u32 txq_size; /* FCoE TX Descriptors Queue Size. */
- u32 rxq_size; /* FCoE RX Descriptors Queue Size. */
- /* FCoE TX Descriptor Queue Avg Depth. */
- u32 txq_avg_depth;
- /* FCoE RX Descriptors Queue Avg Depth. */
- u32 rxq_avg_depth;
- u32 rx_frames_lo; /* FCoE RX Frames received. */
- u32 rx_frames_hi; /* FCoE RX Frames received. */
- u32 rx_bytes_lo; /* FCoE RX Bytes received. */
- u32 rx_bytes_hi; /* FCoE RX Bytes received. */
- u32 tx_frames_lo; /* FCoE TX Frames sent. */
- u32 tx_frames_hi; /* FCoE TX Frames sent. */
- u32 tx_bytes_lo; /* FCoE TX Bytes sent. */
- u32 tx_bytes_hi; /* FCoE TX Bytes sent. */
-};
-
-/* Per PCI Function iSCSI Statistics required from the driver*/
-struct iscsi_stats_info {
- u8 version[12]; /* Function's Driver Version. */
- u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */
- u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
- /* QoS Priority (per 802.1p). 0-7255 */
- u32 qos_priority;
- u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */
- u8 ww_port_name[64]; /* iSCSI World wide port name */
- u8 boot_target_name[64];/* iSCSI Boot Target Name. */
- u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */
- u32 boot_target_portal; /* iSCSI Boot Target Portal. */
- u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */
- u32 max_frame_size; /* Max Frame Size. bytes */
- u32 txq_size; /* PDU TX Descriptors Queue Size. */
- u32 rxq_size; /* PDU RX Descriptors Queue Size. */
- u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */
- u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */
- u32 rx_pdus_lo; /* iSCSI PDUs received. */
- u32 rx_pdus_hi; /* iSCSI PDUs received. */
- u32 rx_bytes_lo; /* iSCSI RX Bytes received. */
- u32 rx_bytes_hi; /* iSCSI RX Bytes received. */
- u32 tx_pdus_lo; /* iSCSI PDUs sent. */
- u32 tx_pdus_hi; /* iSCSI PDUs sent. */
- u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */
- u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */
- u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable.
- * 9 nibbles, the position of each nibble
- * represents the C-PCP value, the value
- * of the nibble = S-PCP value.
- */
-};
-
-union drv_info_to_mcp {
- struct eth_stats_info ether_stat;
- struct fcoe_stats_info fcoe_stat;
- struct iscsi_stats_info iscsi_stat;
-};
/* stats collected for afex.
* NOTE: structure is exactly as expected to be received by the switch.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index a3fb721..e04b282 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -40,6 +40,7 @@
#define I2C_BSC0 0
#define I2C_BSC1 1
#define I2C_WA_RETRY_CNT 3
+#define I2C_WA_PWR_ITER (I2C_WA_RETRY_CNT - 1)
#define MCPR_IMC_COMMAND_READ_OP 1
#define MCPR_IMC_COMMAND_WRITE_OP 2
@@ -284,7 +285,6 @@
#define ETS_E3B0_PBF_MIN_W_VAL (10000)
#define MAX_PACKET_SIZE (9700)
-#define WC_UC_TIMEOUT 100
#define MAX_KR_LINK_RETRY 4
/**********************************************************/
@@ -1305,6 +1305,94 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
return 0;
}
+
+/******************************************************************/
+/* EEE section */
+/******************************************************************/
+static u8 bnx2x_eee_has_cap(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+
+ if (REG_RD(bp, params->shmem2_base) <=
+ offsetof(struct shmem2_region, eee_status[params->port]))
+ return 0;
+
+ return 1;
+}
+
+static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
+{
+ switch (nvram_mode) {
+ case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
+ *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
+ break;
+ case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
+ *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
+ break;
+ case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
+ *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
+ break;
+ default:
+ *idle_timer = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
+{
+ switch (idle_timer) {
+ case EEE_MODE_NVRAM_BALANCED_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
+ break;
+ case EEE_MODE_NVRAM_AGGRESSIVE_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
+ break;
+ case EEE_MODE_NVRAM_LATENCY_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
+ break;
+ default:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
+ break;
+ }
+
+ return 0;
+}
+
+static u32 bnx2x_eee_calc_timer(struct link_params *params)
+{
+ u32 eee_mode, eee_idle;
+ struct bnx2x *bp = params->bp;
+
+ if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
+ if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
+ /* time value in eee_mode --> used directly*/
+ eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
+ } else {
+ /* hsi value in eee_mode --> time */
+ if (bnx2x_eee_nvram_to_time(params->eee_mode &
+ EEE_MODE_NVRAM_MASK,
+ &eee_idle))
+ return 0;
+ }
+ } else {
+ /* hsi values in nvram --> time*/
+ eee_mode = ((REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].
+ eee_power_mode)) &
+ PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
+ PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
+
+ if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
+ return 0;
+ }
+
+ return eee_idle;
+}
+
+
/******************************************************************/
/* PFC section */
/******************************************************************/
@@ -1539,7 +1627,7 @@ static void bnx2x_umac_enable(struct link_params *params,
/* Reset UMAC */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
@@ -1641,7 +1729,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
/* Hard reset */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
MISC_REGISTERS_RESET_REG_2_XMAC);
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
MISC_REGISTERS_RESET_REG_2_XMAC);
@@ -1671,7 +1759,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
/* Soft reset */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
@@ -1729,6 +1817,14 @@ static int bnx2x_xmac_enable(struct link_params *params,
/* update PFC */
bnx2x_update_pfc_xmac(params, vars, 0);
+ if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
+ DP(NETIF_MSG_LINK, "Setting XMAC for EEE\n");
+ REG_WR(bp, xmac_base + XMAC_REG_EEE_TIMERS_HI, 0x1380008);
+ REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x1);
+ } else {
+ REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x0);
+ }
+
/* Enable TX and RX */
val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
@@ -1784,11 +1880,6 @@ static int bnx2x_emac_enable(struct link_params *params,
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
EMAC_TX_MODE_RESET);
- if (CHIP_REV_IS_SLOW(bp)) {
- /* config GMII mode */
- val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
- EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
- } else { /* ASIC */
/* pause enable/disable */
bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
EMAC_RX_MODE_FLOW_EN);
@@ -1811,7 +1902,6 @@ static int bnx2x_emac_enable(struct link_params *params,
} else
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
EMAC_TX_MODE_FLOW_EN);
- }
/* KEEP_VLAN_TAG, promiscuous */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
@@ -1850,23 +1940,23 @@ static int bnx2x_emac_enable(struct link_params *params,
val &= ~0x810;
EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
- /* enable emac */
+ /* Enable emac */
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
- /* enable emac for jumbo packets */
+ /* Enable emac for jumbo packets */
EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
(EMAC_RX_MTU_SIZE_JUMBO_ENA |
(ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
- /* strip CRC */
+ /* Strip CRC */
REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
- /* disable the NIG in/out to the bmac */
+ /* Disable the NIG in/out to the bmac */
REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0);
REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
- /* enable the NIG in/out to the emac */
+ /* Enable the NIG in/out to the emac */
REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
val = 0;
if ((params->feature_config_flags &
@@ -1901,7 +1991,7 @@ static void bnx2x_update_pfc_bmac1(struct link_params *params,
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
- /* tx control */
+ /* TX control */
val = 0xc0;
if (!(params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED) &&
@@ -1961,7 +2051,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
wb_data[0] &= ~(1<<2);
} else {
DP(NETIF_MSG_LINK, "PFC is disabled\n");
- /* disable PFC RX & TX & STATS and set 8 COS */
+ /* Disable PFC RX & TX & STATS and set 8 COS */
wb_data[0] = 0x8;
wb_data[1] = 0;
}
@@ -2055,7 +2145,7 @@ static int bnx2x_pfc_brb_get_config_params(
PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
+ /* Non pause able*/
config_val->non_pauseable_th.pause_xoff =
PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
@@ -2083,7 +2173,7 @@ static int bnx2x_pfc_brb_get_config_params(
PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
+ /* Non pause able*/
config_val->non_pauseable_th.pause_xoff =
PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
@@ -2113,7 +2203,7 @@ static int bnx2x_pfc_brb_get_config_params(
PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
+ /* Non pause able*/
config_val->non_pauseable_th.pause_xoff =
PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
@@ -2131,7 +2221,7 @@ static int bnx2x_pfc_brb_get_config_params(
PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
+ /* Non pause able*/
config_val->non_pauseable_th.pause_xoff =
PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
@@ -2188,7 +2278,7 @@ static void bnx2x_pfc_brb_get_e3b0_config_params(
if (pfc_params->cos0_pauseable !=
pfc_params->cos1_pauseable) {
- /* nonpauseable= Lossy + pauseable = Lossless*/
+ /* Nonpauseable= Lossy + pauseable = Lossless*/
e3b0_val->lb_guarantied =
PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
e3b0_val->mac_0_class_t_guarantied =
@@ -2387,9 +2477,9 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
******************************************************************************/
-int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
- u8 cos_entry,
- u32 priority_mask, u8 port)
+static int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
+ u8 cos_entry,
+ u32 priority_mask, u8 port)
{
u32 nig_reg_rx_priority_mask_add = 0;
@@ -2439,6 +2529,16 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
port_mb[params->port].link_status), link_status);
}
+static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
+{
+ struct bnx2x *bp = params->bp;
+
+ if (bnx2x_eee_has_cap(params))
+ REG_WR(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ eee_status[params->port]), eee_status);
+}
+
static void bnx2x_update_pfc_nig(struct link_params *params,
struct link_vars *vars,
struct bnx2x_nig_brb_pfc_port_params *nig_params)
@@ -2506,7 +2606,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 :
NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
- /* output enable for RX_XCM # IF */
+ /* Output enable for RX_XCM # IF */
REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN :
NIG_REG_XCM0_OUT_EN, xcm_out_en);
@@ -2555,10 +2655,10 @@ int bnx2x_update_pfc(struct link_params *params,
bnx2x_update_mng(params, vars->link_status);
- /* update NIG params */
+ /* Update NIG params */
bnx2x_update_pfc_nig(params, vars, pfc_params);
- /* update BRB params */
+ /* Update BRB params */
bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
if (bnx2x_status)
return bnx2x_status;
@@ -2613,7 +2713,7 @@ static int bnx2x_bmac1_enable(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
wb_data, 2);
- /* tx MAC SA */
+ /* TX MAC SA */
wb_data[0] = ((params->mac_addr[2] << 24) |
(params->mac_addr[3] << 16) |
(params->mac_addr[4] << 8) |
@@ -2622,7 +2722,7 @@ static int bnx2x_bmac1_enable(struct link_params *params,
params->mac_addr[1]);
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
- /* mac control */
+ /* MAC control */
val = 0x3;
if (is_lb) {
val |= 0x4;
@@ -2632,24 +2732,24 @@ static int bnx2x_bmac1_enable(struct link_params *params,
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
- /* set rx mtu */
+ /* Set rx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
bnx2x_update_pfc_bmac1(params, vars);
- /* set tx mtu */
+ /* Set tx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
- /* set cnt max size */
+ /* Set cnt max size */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
- /* configure safc */
+ /* Configure SAFC */
wb_data[0] = 0x1000200;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
@@ -2683,7 +2783,7 @@ static int bnx2x_bmac2_enable(struct link_params *params,
udelay(30);
- /* tx MAC SA */
+ /* TX MAC SA */
wb_data[0] = ((params->mac_addr[2] << 24) |
(params->mac_addr[3] << 16) |
(params->mac_addr[4] << 8) |
@@ -2702,18 +2802,18 @@ static int bnx2x_bmac2_enable(struct link_params *params,
wb_data, 2);
udelay(30);
- /* set rx mtu */
+ /* Set RX MTU */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
udelay(30);
- /* set tx mtu */
+ /* Set TX MTU */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
udelay(30);
- /* set cnt max size */
+ /* Set cnt max size */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
@@ -2731,15 +2831,15 @@ static int bnx2x_bmac_enable(struct link_params *params,
u8 port = params->port;
struct bnx2x *bp = params->bp;
u32 val;
- /* reset and unreset the BigMac */
+ /* Reset and unreset the BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- msleep(1);
+ usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- /* enable access for bmac registers */
+ /* Enable access for bmac registers */
REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
/* Enable BMAC according to BMAC type*/
@@ -2797,7 +2897,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
BIGMAC_REGISTER_BMAC_CONTROL,
wb_data, 2);
}
- msleep(1);
+ usleep_range(1000, 2000);
}
}
@@ -2809,17 +2909,16 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
u32 init_crd, crd;
u32 count = 1000;
- /* disable port */
+ /* Disable port */
REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
- /* wait for init credit */
+ /* Wait for init credit */
init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
while ((init_crd != crd) && count) {
- msleep(5);
-
+ usleep_range(5000, 10000);
crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
count--;
}
@@ -2836,18 +2935,18 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
line_speed == SPEED_1000 ||
line_speed == SPEED_2500) {
REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
- /* update threshold */
+ /* Update threshold */
REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
- /* update init credit */
+ /* Update init credit */
init_crd = 778; /* (800-18-4) */
} else {
u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
ETH_OVREHEAD)/16;
REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
- /* update threshold */
+ /* Update threshold */
REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
- /* update init credit */
+ /* Update init credit */
switch (line_speed) {
case SPEED_10000:
init_crd = thresh + 553 - 22;
@@ -2862,12 +2961,12 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
line_speed, init_crd);
- /* probe the credit changes */
+ /* Probe the credit changes */
REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
- msleep(5);
+ usleep_range(5000, 10000);
REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
- /* enable port */
+ /* Enable port */
REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
return 0;
}
@@ -2934,7 +3033,7 @@ static int bnx2x_cl22_write(struct bnx2x *bp,
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
mode & ~EMAC_MDIO_MODE_CLAUSE_45);
- /* address */
+ /* Address */
tmp = ((phy->addr << 21) | (reg << 16) | val |
EMAC_MDIO_COMM_COMMAND_WRITE_22 |
EMAC_MDIO_COMM_START_BUSY);
@@ -2970,7 +3069,7 @@ static int bnx2x_cl22_read(struct bnx2x *bp,
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
mode & ~EMAC_MDIO_MODE_CLAUSE_45);
- /* address */
+ /* Address */
val = ((phy->addr << 21) | (reg << 16) |
EMAC_MDIO_COMM_COMMAND_READ_22 |
EMAC_MDIO_COMM_START_BUSY);
@@ -3008,7 +3107,7 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
EMAC_MDIO_STATUS_10MB);
- /* address */
+ /* Address */
val = ((phy->addr << 21) | (devad << 16) | reg |
EMAC_MDIO_COMM_COMMAND_ADDRESS |
EMAC_MDIO_COMM_START_BUSY);
@@ -3029,7 +3128,7 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
*ret_val = 0;
rc = -EFAULT;
} else {
- /* data */
+ /* Data */
val = ((phy->addr << 21) | (devad << 16) |
EMAC_MDIO_COMM_COMMAND_READ_45 |
EMAC_MDIO_COMM_START_BUSY);
@@ -3077,7 +3176,7 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
EMAC_MDIO_STATUS_10MB);
- /* address */
+ /* Address */
tmp = ((phy->addr << 21) | (devad << 16) | reg |
EMAC_MDIO_COMM_COMMAND_ADDRESS |
EMAC_MDIO_COMM_START_BUSY);
@@ -3097,7 +3196,7 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
netdev_err(bp->dev, "MDC/MDIO access timeout\n");
rc = -EFAULT;
} else {
- /* data */
+ /* Data */
tmp = ((phy->addr << 21) | (devad << 16) | val |
EMAC_MDIO_COMM_COMMAND_WRITE_45 |
EMAC_MDIO_COMM_START_BUSY);
@@ -3187,23 +3286,23 @@ static int bnx2x_bsc_read(struct link_params *params,
xfer_cnt = 16 - lc_addr;
- /* enable the engine */
+ /* Enable the engine */
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
val |= MCPR_IMC_COMMAND_ENABLE;
REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
- /* program slave device ID */
+ /* Program slave device ID */
val = (sl_devid << 16) | sl_addr;
REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val);
- /* start xfer with 0 byte to update the address pointer ???*/
+ /* Start xfer with 0 byte to update the address pointer ???*/
val = (MCPR_IMC_COMMAND_ENABLE) |
(MCPR_IMC_COMMAND_WRITE_OP <<
MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
(lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0);
REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
- /* poll for completion */
+ /* Poll for completion */
i = 0;
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
@@ -3219,7 +3318,7 @@ static int bnx2x_bsc_read(struct link_params *params,
if (rc == -EFAULT)
return rc;
- /* start xfer with read op */
+ /* Start xfer with read op */
val = (MCPR_IMC_COMMAND_ENABLE) |
(MCPR_IMC_COMMAND_READ_OP <<
MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
@@ -3227,7 +3326,7 @@ static int bnx2x_bsc_read(struct link_params *params,
(xfer_cnt);
REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
- /* poll for completion */
+ /* Poll for completion */
i = 0;
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
@@ -3330,7 +3429,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
port = port ^ 1;
lane = (port<<1) + path;
- } else { /* two port mode - no port swap */
+ } else { /* Two port mode - no port swap */
/* Figure out path swap value */
path_swap_ovr =
@@ -3408,7 +3507,7 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
val = SERDES_RESET_BITS << (port*16);
- /* reset and unreset the SerDes/XGXS */
+ /* Reset and unreset the SerDes/XGXS */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
udelay(500);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
@@ -3429,7 +3528,7 @@ static void bnx2x_xgxs_deassert(struct link_params *params)
val = XGXS_RESET_BITS << (port*16);
- /* reset and unreset the SerDes/XGXS */
+ /* Reset and unreset the SerDes/XGXS */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
udelay(500);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
@@ -3521,7 +3620,7 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
{
u16 val;
struct bnx2x *bp = params->bp;
- /* read modify write pause advertizing */
+ /* Read modify write pause advertizing */
bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
@@ -3656,44 +3755,35 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars) {
- u16 val16 = 0, lane, bam37 = 0;
- struct bnx2x *bp = params->bp;
+ u16 val16 = 0, lane, i;
+ struct bnx2x *bp = params->bp;
+ static struct bnx2x_reg_set reg_set[] = {
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555},
+ {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190},
+ /* Disable Autoneg: re-enable it after adv is done. */
+ {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0}
+ };
DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
/* Set to default registers that may be overriden by 10G force */
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7);
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
- MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_RX66_CONTROL, 0x7415);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190);
- /* Disable Autoneg: re-enable it after adv is done. */
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, 0);
+ for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
/* Check adding advertisement for 1G KX */
if (((vars->line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
(vars->line_speed == SPEED_1000)) {
- u16 sd_digital;
+ u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
val16 |= (1<<5);
/* Enable CL37 1G Parallel Detect */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &sd_digital);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
- (sd_digital | 0x1));
-
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1);
DP(NETIF_MSG_LINK, "Advertize 1G\n");
}
if (((vars->line_speed == SPEED_AUTO_NEG) &&
@@ -3703,7 +3793,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
val16 |= (1<<7);
/* Enable 10G Parallel Detect */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
+ MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
DP(NETIF_MSG_LINK, "Advertize 10G\n");
}
@@ -3737,10 +3827,9 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
offsetof(struct shmem_region, dev_info.
port_hw_config[params->port].default_cfg)) &
PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, &bam37);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, bam37 | 1);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL,
+ 1);
DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
}
@@ -3754,11 +3843,8 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Enable AN KR work-around\n");
vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
}
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL5_MISC7, &val16);
-
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
/* Over 1G - AN local device user page 1 */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -3775,50 +3861,35 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u16 val;
-
- /* Disable Autoneg */
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7);
-
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
-
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00);
-
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0);
-
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
-
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL3_UP1, 0x1);
-
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL5_MISC7, 0xa);
-
- /* Disable CL36 PCS Tx */
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0);
-
- /* Double Wide Single Data Rate @ pll rate */
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF);
-
- /* Leave cl72 training enable, needed for KR */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+ u16 i;
+ static struct bnx2x_reg_set reg_set[] = {
+ /* Disable Autoneg */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+ 0x3f00},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa},
+ /* Disable CL36 PCS Tx */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0},
+ /* Double Wide Single Data Rate @ pll rate */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF},
+ /* Leave cl72 training enable, needed for KR */
+ {MDIO_PMA_DEVAD,
MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
- 0x2);
+ 0x2}
+ };
+
+ for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
/* Leave CL72 enabled */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
- &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
- val | 0x3800);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+ 0x3800);
/* Set speed via PMA/PMD register */
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
@@ -3839,7 +3910,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_CONTROL, 0xF9);
- /* set and clear loopback to cause a reset to 64/66 decoder */
+ /* Set and clear loopback to cause a reset to 64/66 decoder */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -3854,16 +3925,12 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 misc1_val, tap_val, tx_driver_val, lane, val;
/* Hold rxSeqStart */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val | 0x8000));
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000);
/* Hold tx_fifo_reset */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, (val | 0x1));
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x1);
/* Disable CL73 AN */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
@@ -3875,10 +3942,8 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA));
/* Disable 100FX Idle detect */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_FX100_CTRL3, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_FX100_CTRL3, (val | 0x0080));
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL3, 0x0080);
/* Set Block address to Remote PHY & Clear forced_speed[5] */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -3939,16 +4004,20 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
tx_driver_val);
/* Enable fiber mode, enable and invert sig_det */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, val | 0xd);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0xd);
/* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL4_MISC3, &val);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, 0x8080);
+
+ /* Enable LPI pass through */
+ DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL4_MISC3, val | 0x8080);
+ MDIO_WC_REG_EEE_COMBO_CONTROL0,
+ 0x7c);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
/* 10G XFI Full Duplex */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -4138,40 +4207,35 @@ static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
u16 lane)
{
struct bnx2x *bp = params->bp;
- u16 val16;
-
+ u16 i;
+ static struct bnx2x_reg_set wc_regs[] = {
+ {MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL1, 0x014a},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL3, 0x0800},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL4_MISC3, 0x8008},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ 0x0195},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ 0x0007},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3,
+ 0x0002},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_TX_FIR_TAP, 0x0000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140}
+ };
/* Set XFI clock comp as default. */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_RX66_CONTROL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_RX66_CONTROL, val16 | (3<<13));
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, (3<<13));
+
+ for (i = 0; i < sizeof(wc_regs)/sizeof(struct bnx2x_reg_set); i++)
+ bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg,
+ wc_regs[i].val);
- bnx2x_warpcore_reset_lane(bp, phy, 1);
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_FX100_CTRL1, 0x014a);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_FX100_CTRL3, 0x0800);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL4_MISC3, 0x8008);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x0195);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x0007);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x0002);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000);
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_TX_FIR_TAP, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140);
- bnx2x_warpcore_reset_lane(bp, phy, 0);
+
}
static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
@@ -4259,7 +4323,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
if (!vars->turn_to_run_wc_rt)
return;
- /* return if there is no link partner */
+ /* Return if there is no link partner */
if (!(bnx2x_warpcore_get_sigdet(phy, params))) {
DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n");
return;
@@ -4293,7 +4357,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
bnx2x_warpcore_reset_lane(bp, phy, 1);
bnx2x_warpcore_reset_lane(bp, phy, 0);
- /* restart Autoneg */
+ /* Restart Autoneg */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
@@ -4310,6 +4374,23 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
} /*params->rx_tx_asic_rst*/
}
+static void bnx2x_warpcore_config_sfi(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u16 lane = bnx2x_get_warpcore_lane(phy, params);
+ struct bnx2x *bp = params->bp;
+ bnx2x_warpcore_clear_regs(phy, params, lane);
+ if ((params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)] ==
+ SPEED_10000) &&
+ (phy->media_type != ETH_PHY_SFP_1G_FIBER)) {
+ DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
+ bnx2x_warpcore_set_10G_XFI(phy, params, 0);
+ } else {
+ DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
+ bnx2x_warpcore_set_sgmii_speed(phy, params, 1, 0);
+ }
+}
+
static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -4370,19 +4451,11 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
break;
case PORT_HW_CFG_NET_SERDES_IF_SFI:
-
- bnx2x_warpcore_clear_regs(phy, params, lane);
- if (vars->line_speed == SPEED_10000) {
- DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
- bnx2x_warpcore_set_10G_XFI(phy, params, 0);
- } else if (vars->line_speed == SPEED_1000) {
- DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
- bnx2x_warpcore_set_sgmii_speed(
- phy, params, 1, 0);
- }
/* Issue Module detection */
if (bnx2x_is_sfp_module_plugged(phy, params))
bnx2x_sfp_module_detection(phy, params);
+
+ bnx2x_warpcore_config_sfi(phy, params);
break;
case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
@@ -4499,12 +4572,9 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, 0);
/* Enable 1G MDIO (1-copy) */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
- &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
- val16 | 0x10);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+ 0x10);
/* Set 1G loopback based on lane (1-copy) */
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4517,22 +4587,19 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
bnx2x_set_aer_mmd(params, phy);
} else {
/* 10G & 20G */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
- 0x4000);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
+ 0x4000);
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 | 0x1);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1);
}
}
-void bnx2x_sync_link(struct link_params *params,
- struct link_vars *vars)
+
+static void bnx2x_sync_link(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 link_10g_plus;
@@ -4605,7 +4672,7 @@ void bnx2x_sync_link(struct link_params *params,
USES_WARPCORE(bp) &&
(vars->line_speed == SPEED_1000))
vars->phy_flags |= PHY_SGMII_FLAG;
- /* anything 10 and over uses the bmac */
+ /* Anything 10 and over uses the bmac */
link_10g_plus = (vars->line_speed >= SPEED_10000);
if (link_10g_plus) {
@@ -4619,7 +4686,7 @@ void bnx2x_sync_link(struct link_params *params,
else
vars->mac_type = MAC_TYPE_EMAC;
}
- } else { /* link down */
+ } else { /* Link down */
DP(NETIF_MSG_LINK, "phy link down\n");
vars->phy_link_up = 0;
@@ -4628,10 +4695,12 @@ void bnx2x_sync_link(struct link_params *params,
vars->duplex = DUPLEX_FULL;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- /* indicate no mac active */
+ /* Indicate no mac active */
vars->mac_type = MAC_TYPE_NONE;
if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ if (vars->link_status & LINK_STATUS_SFP_TX_FAULT)
+ vars->phy_flags |= PHY_SFP_TX_FAULT_FLAG;
}
}
@@ -4697,7 +4766,7 @@ static void bnx2x_set_master_ln(struct link_params *params,
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
- /* set the master_ln for AN */
+ /* Set the master_ln for AN */
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
@@ -4720,7 +4789,7 @@ static int bnx2x_reset_unicore(struct link_params *params,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
- /* reset the unicore */
+ /* Reset the unicore */
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
@@ -4729,11 +4798,11 @@ static int bnx2x_reset_unicore(struct link_params *params,
if (set_serdes)
bnx2x_set_serdes_access(bp, params->port);
- /* wait for the reset to self clear */
+ /* Wait for the reset to self clear */
for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
udelay(5);
- /* the reset erased the previous bank value */
+ /* The reset erased the previous bank value */
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
@@ -4951,7 +5020,7 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
}
-/* program SerDes, forced speed */
+/* Program SerDes, forced speed */
static void bnx2x_program_serdes(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -4959,7 +5028,7 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 reg_val;
- /* program duplex, disable autoneg and sgmii*/
+ /* Program duplex, disable autoneg and sgmii*/
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
@@ -4978,7 +5047,7 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_MISC1, &reg_val);
- /* clearing the speed value before setting the right speed */
+ /* Clearing the speed value before setting the right speed */
DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
@@ -5007,7 +5076,7 @@ static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 val = 0;
- /* set extended capabilities */
+ /* Set extended capabilities */
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
val |= MDIO_OVER_1G_UP1_2_5G;
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
@@ -5027,7 +5096,7 @@ static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 val;
- /* for AN, we are always publishing full duplex */
+ /* For AN, we are always publishing full duplex */
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
@@ -5089,14 +5158,14 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 control1;
- /* in SGMII mode, the unicore is always slave */
+ /* In SGMII mode, the unicore is always slave */
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
&control1);
control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
- /* set sgmii mode (and not fiber) */
+ /* Set sgmii mode (and not fiber) */
control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
@@ -5105,9 +5174,9 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
control1);
- /* if forced speed */
+ /* If forced speed */
if (!(vars->line_speed == SPEED_AUTO_NEG)) {
- /* set speed, disable autoneg */
+ /* Set speed, disable autoneg */
u16 mii_control;
CL22_RD_OVER_CL45(bp, phy,
@@ -5128,16 +5197,16 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
break;
case SPEED_10:
- /* there is nothing to set for 10M */
+ /* There is nothing to set for 10M */
break;
default:
- /* invalid speed for SGMII */
+ /* Invalid speed for SGMII */
DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
vars->line_speed);
break;
}
- /* setting the full duplex */
+ /* Setting the full duplex */
if (phy->req_duplex == DUPLEX_FULL)
mii_control |=
MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
@@ -5147,7 +5216,7 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
mii_control);
} else { /* AN mode */
- /* enable and restart AN */
+ /* Enable and restart AN */
bnx2x_restart_autoneg(phy, params, 0);
}
}
@@ -5243,7 +5312,7 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- /* resolve from gp_status in case of AN complete and not sgmii */
+ /* Resolve from gp_status in case of AN complete and not sgmii */
if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) {
/* Update the advertised flow-controled of LD/LP in AN */
if (phy->req_line_speed == SPEED_AUTO_NEG)
@@ -5467,7 +5536,7 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
bnx2x_xgxs_an_resolve(phy, params, vars,
gp_status);
}
- } else { /* link_down */
+ } else { /* Link_down */
if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
SINGLE_MEDIA_DIRECT(params)) {
/* Check signal is detected */
@@ -5616,12 +5685,12 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
u16 tx_driver;
u16 bank;
- /* read precomp */
+ /* Read precomp */
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_OVER_1G,
MDIO_OVER_1G_LP_UP2, &lp_up2);
- /* bits [10:7] at lp_up2, positioned at [15:12] */
+ /* Bits [10:7] at lp_up2, positioned at [15:12] */
lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
@@ -5635,7 +5704,7 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
bank,
MDIO_TX0_TX_DRIVER, &tx_driver);
- /* replace tx_driver bits [15:12] */
+ /* Replace tx_driver bits [15:12] */
if (lp_up2 !=
(tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
@@ -5731,16 +5800,16 @@ static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
bnx2x_set_preemphasis(phy, params);
- /* forced speed requested? */
+ /* Forced speed requested? */
if (vars->line_speed != SPEED_AUTO_NEG ||
(SINGLE_MEDIA_DIRECT(params) &&
params->loopback_mode == LOOPBACK_EXT)) {
DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
- /* disable autoneg */
+ /* Disable autoneg */
bnx2x_set_autoneg(phy, params, vars, 0);
- /* program speed and duplex */
+ /* Program speed and duplex */
bnx2x_program_serdes(phy, params, vars);
} else { /* AN_mode */
@@ -5749,14 +5818,14 @@ static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
/* AN enabled */
bnx2x_set_brcm_cl37_advertisement(phy, params);
- /* program duplex & pause advertisement (for aneg) */
+ /* Program duplex & pause advertisement (for aneg) */
bnx2x_set_ieee_aneg_advertisement(phy, params,
vars->ieee_fc);
- /* enable autoneg */
+ /* Enable autoneg */
bnx2x_set_autoneg(phy, params, vars, enable_cl73);
- /* enable and restart AN */
+ /* Enable and restart AN */
bnx2x_restart_autoneg(phy, params, enable_cl73);
}
@@ -5792,12 +5861,12 @@ static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy,
bnx2x_set_master_ln(params, phy);
rc = bnx2x_reset_unicore(params, phy, 0);
- /* reset the SerDes and wait for reset bit return low */
- if (rc != 0)
+ /* Reset the SerDes and wait for reset bit return low */
+ if (rc)
return rc;
bnx2x_set_aer_mmd(params, phy);
- /* setting the masterLn_def again after the reset */
+ /* Setting the masterLn_def again after the reset */
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
bnx2x_set_master_ln(params, phy);
bnx2x_set_swap_lanes(params, phy);
@@ -5822,7 +5891,7 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
MDIO_PMA_REG_CTRL, &ctrl);
if (!(ctrl & (1<<15)))
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
if (cnt == 1000)
@@ -6053,7 +6122,7 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
if (!CHIP_IS_E3(bp)) {
- /* change the uni_phy_addr in the nig */
+ /* Change the uni_phy_addr in the nig */
md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
port*0x18));
@@ -6073,11 +6142,11 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
(MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
0x6041);
msleep(200);
- /* set aer mmd back */
+ /* Set aer mmd back */
bnx2x_set_aer_mmd(params, phy);
if (!CHIP_IS_E3(bp)) {
- /* and md_devad */
+ /* And md_devad */
REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
md_devad);
}
@@ -6274,7 +6343,7 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
MDIO_REG_BANK_GP_STATUS,
MDIO_GP_STATUS_TOP_AN_STATUS1,
&gp_status);
- /* link is up only if both local phy and external phy are up */
+ /* Link is up only if both local phy and external phy are up */
if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
return -ESRCH;
}
@@ -6295,7 +6364,9 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
for (phy_index = EXT_PHY1; phy_index < params->num_phys;
phy_index++) {
serdes_phy_type = ((params->phy[phy_index].media_type ==
- ETH_PHY_SFP_FIBER) ||
+ ETH_PHY_SFPP_10G_FIBER) ||
+ (params->phy[phy_index].media_type ==
+ ETH_PHY_SFP_1G_FIBER) ||
(params->phy[phy_index].media_type ==
ETH_PHY_XFP_FIBER) ||
(params->phy[phy_index].media_type ==
@@ -6396,7 +6467,7 @@ static int bnx2x_link_initialize(struct link_params *params,
static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
- /* reset the SerDes/XGXS */
+ /* Reset the SerDes/XGXS */
REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
(0x1ff << (params->port*16)));
}
@@ -6429,10 +6500,10 @@ static int bnx2x_update_link_down(struct link_params *params,
DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG;
- /* indicate no mac active */
+ /* Indicate no mac active */
vars->mac_type = MAC_TYPE_NONE;
- /* update shared memory */
+ /* Update shared memory */
vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK |
LINK_STATUS_LINK_UP |
LINK_STATUS_PHYSICAL_LINK_FLAG |
@@ -6445,15 +6516,15 @@ static int bnx2x_update_link_down(struct link_params *params,
vars->line_speed = 0;
bnx2x_update_mng(params, vars->link_status);
- /* activate nig drain */
+ /* Activate nig drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
- /* disable emac */
+ /* Disable emac */
if (!CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
- msleep(10);
- /* reset BigMac/Xmac */
+ usleep_range(10000, 20000);
+ /* Reset BigMac/Xmac */
if (CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp)) {
bnx2x_bmac_rx_disable(bp, params->port);
@@ -6462,6 +6533,16 @@ static int bnx2x_update_link_down(struct link_params *params,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
}
if (CHIP_IS_E3(bp)) {
+ /* Prevent LPI Generation by chip */
+ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2),
+ 0);
+ REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
+ REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2),
+ 0);
+ vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
+ SHMEM_EEE_ACTIVE_BIT);
+
+ bnx2x_update_mng_eee(params, vars->eee_status);
bnx2x_xmac_disable(params);
bnx2x_umac_disable(params);
}
@@ -6501,6 +6582,16 @@ static int bnx2x_update_link_up(struct link_params *params,
bnx2x_umac_enable(params, vars, 0);
bnx2x_set_led(params, vars,
LED_MODE_OPER, vars->line_speed);
+
+ if ((vars->eee_status & SHMEM_EEE_ACTIVE_BIT) &&
+ (vars->eee_status & SHMEM_EEE_LPI_REQUESTED_BIT)) {
+ DP(NETIF_MSG_LINK, "Enabling LPI assertion\n");
+ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 +
+ (params->port << 2), 1);
+ REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 1);
+ REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 +
+ (params->port << 2), 0xfc20);
+ }
}
if ((CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp))) {
@@ -6533,12 +6624,12 @@ static int bnx2x_update_link_up(struct link_params *params,
rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
vars->line_speed);
- /* disable drain */
+ /* Disable drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
- /* update shared memory */
+ /* Update shared memory */
bnx2x_update_mng(params, vars->link_status);
-
+ bnx2x_update_mng_eee(params, vars->eee_status);
/* Check remote fault */
for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
@@ -6582,6 +6673,8 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
phy_vars[phy_index].phy_link_up = 0;
phy_vars[phy_index].link_up = 0;
phy_vars[phy_index].fault_detected = 0;
+ /* different consideration, since vars holds inner state */
+ phy_vars[phy_index].eee_status = vars->eee_status;
}
if (USES_WARPCORE(bp))
@@ -6602,7 +6695,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
- /* disable emac */
+ /* Disable emac */
if (!CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
@@ -6711,6 +6804,9 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
vars->link_status |= LINK_STATUS_SERDES_LINK;
else
vars->link_status &= ~LINK_STATUS_SERDES_LINK;
+
+ vars->eee_status = phy_vars[active_external_phy].eee_status;
+
DP(NETIF_MSG_LINK, "Active external phy selected: %x\n",
active_external_phy);
}
@@ -6744,11 +6840,11 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
} else if (prev_line_speed != vars->line_speed) {
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
0);
- msleep(1);
+ usleep_range(1000, 2000);
}
}
- /* anything 10 and over uses the bmac */
+ /* Anything 10 and over uses the bmac */
link_10g_plus = (vars->line_speed >= SPEED_10000);
bnx2x_link_int_ack(params, vars, link_10g_plus);
@@ -6814,7 +6910,7 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
{
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
- msleep(1);
+ usleep_range(1000, 2000);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
}
@@ -6911,7 +7007,7 @@ static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
MDIO_PMA_REG_GEN_CTRL,
0x0001);
- /* ucode reboot and rst */
+ /* Ucode reboot and rst */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_GEN_CTRL,
@@ -6955,7 +7051,7 @@ static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
- msleep(1);
+ usleep_range(1000, 2000);
} while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
((fw_msgout & 0xff) != 0x03 && (phy->type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
@@ -7049,11 +7145,11 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
"XAUI workaround has completed\n");
return 0;
}
- msleep(3);
+ usleep_range(3000, 6000);
}
break;
}
- msleep(3);
+ usleep_range(3000, 6000);
}
DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n");
return -EINVAL;
@@ -7127,7 +7223,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
- /* enable LASI */
+ /* Enable LASI */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
bnx2x_cl45_write(bp, phy,
@@ -7275,7 +7371,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1);
- /* clear the interrupt LASI status register */
+ /* Clear the interrupt LASI status register */
bnx2x_cl45_read(bp, phy,
MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
bnx2x_cl45_read(bp, phy,
@@ -7600,7 +7696,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 val = 0;
u16 i;
- if (byte_cnt > 16) {
+ if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
DP(NETIF_MSG_LINK,
"Reading from eeprom is limited to 0xf\n");
return -EINVAL;
@@ -7654,11 +7750,33 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
return 0;
- msleep(1);
+ usleep_range(1000, 2000);
}
return -EINVAL;
}
+static void bnx2x_warpcore_power_module(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 power)
+{
+ u32 pin_cfg;
+ struct bnx2x *bp = params->bp;
+
+ pin_cfg = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_PWR_DIS_MASK) >>
+ PORT_HW_CFG_E3_PWR_DIS_SHIFT;
+
+ if (pin_cfg == PIN_CFG_NA)
+ return;
+ DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
+ power, pin_cfg);
+ /* Low ==> corresponding SFP+ module is powered
+ * high ==> the SFP+ module is powered down
+ */
+ bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
+}
static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
u16 addr, u8 byte_cnt,
@@ -7669,7 +7787,8 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
u32 data_array[4];
u16 addr32;
struct bnx2x *bp = params->bp;
- if (byte_cnt > 16) {
+
+ if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
DP(NETIF_MSG_LINK,
"Reading from eeprom is limited to 16 bytes\n");
return -EINVAL;
@@ -7678,6 +7797,12 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* 4 byte aligned address */
addr32 = addr & (~0x3);
do {
+ if (cnt == I2C_WA_PWR_ITER) {
+ bnx2x_warpcore_power_module(params, phy, 0);
+ /* Note that 100us are not enough here */
+ usleep_range(1000,1000);
+ bnx2x_warpcore_power_module(params, phy, 1);
+ }
rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
data_array);
} while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
@@ -7699,7 +7824,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 val, i;
- if (byte_cnt > 16) {
+ if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
DP(NETIF_MSG_LINK,
"Reading from eeprom is limited to 0xf\n");
return -EINVAL;
@@ -7736,7 +7861,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Wait appropriate time for two-wire command to finish before
* polling the status register
*/
- msleep(1);
+ usleep_range(1000, 2000);
/* Wait up to 500us for command complete status */
for (i = 0; i < 100; i++) {
@@ -7772,7 +7897,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
return 0;
- msleep(1);
+ usleep_range(1000, 2000);
}
return -EINVAL;
@@ -7782,7 +7907,7 @@ int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params, u16 addr,
u8 byte_cnt, u8 *o_buf)
{
- int rc = -EINVAL;
+ int rc = -EOPNOTSUPP;
switch (phy->type) {
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
@@ -7807,7 +7932,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u32 sync_offset = 0, phy_idx, media_types;
- u8 val, check_limiting_mode = 0;
+ u8 val[2], check_limiting_mode = 0;
*edc_mode = EDC_MODE_LIMITING;
phy->media_type = ETH_PHY_UNSPECIFIED;
@@ -7815,13 +7940,13 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
if (bnx2x_read_sfp_module_eeprom(phy,
params,
SFP_EEPROM_CON_TYPE_ADDR,
- 1,
- &val) != 0) {
+ 2,
+ (u8 *)val) != 0) {
DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
return -EINVAL;
}
- switch (val) {
+ switch (val[0]) {
case SFP_EEPROM_CON_TYPE_VAL_COPPER:
{
u8 copper_module_type;
@@ -7859,13 +7984,29 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
break;
}
case SFP_EEPROM_CON_TYPE_VAL_LC:
- phy->media_type = ETH_PHY_SFP_FIBER;
- DP(NETIF_MSG_LINK, "Optic module detected\n");
check_limiting_mode = 1;
+ if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK |
+ SFP_EEPROM_COMP_CODE_LR_MASK |
+ SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
+ DP(NETIF_MSG_LINK, "1G Optic module detected\n");
+ phy->media_type = ETH_PHY_SFP_1G_FIBER;
+ phy->req_line_speed = SPEED_1000;
+ } else {
+ int idx, cfg_idx = 0;
+ DP(NETIF_MSG_LINK, "10G Optic module detected\n");
+ for (idx = INT_PHY; idx < MAX_PHYS; idx++) {
+ if (params->phy[idx].type == phy->type) {
+ cfg_idx = LINK_CONFIG_IDX(idx);
+ break;
+ }
+ }
+ phy->media_type = ETH_PHY_SFPP_10G_FIBER;
+ phy->req_line_speed = params->req_line_speed[cfg_idx];
+ }
break;
default:
DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
- val);
+ val[0]);
return -EINVAL;
}
sync_offset = params->shmem_base +
@@ -7951,7 +8092,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
return 0;
}
- /* format the warning message */
+ /* Format the warning message */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
SFP_EEPROM_VENDOR_NAME_ADDR,
@@ -7997,7 +8138,7 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
timeout * 5);
return 0;
}
- msleep(5);
+ usleep_range(5000, 10000);
}
return -EINVAL;
}
@@ -8200,29 +8341,6 @@ static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
bnx2x_set_e1e2_module_fault_led(params, gpio_mode);
}
-static void bnx2x_warpcore_power_module(struct link_params *params,
- struct bnx2x_phy *phy,
- u8 power)
-{
- u32 pin_cfg;
- struct bnx2x *bp = params->bp;
-
- pin_cfg = (REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
- PORT_HW_CFG_E3_PWR_DIS_MASK) >>
- PORT_HW_CFG_E3_PWR_DIS_SHIFT;
-
- if (pin_cfg == PIN_CFG_NA)
- return;
- DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
- power, pin_cfg);
- /* Low ==> corresponding SFP+ module is powered
- * high ==> the SFP+ module is powered down
- */
- bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
-}
-
static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
@@ -8332,7 +8450,7 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
return -EINVAL;
} else if (bnx2x_verify_sfp_module(phy, params) != 0) {
- /* check SFP+ module compatibility */
+ /* Check SFP+ module compatibility */
DP(NETIF_MSG_LINK, "Module verification failed!!\n");
rc = -EINVAL;
/* Turn on fault module-detected led */
@@ -8395,14 +8513,34 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
/* Call the handling function in case module is detected */
if (gpio_val == 0) {
+ bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
+ bnx2x_set_aer_mmd(params, phy);
+
bnx2x_power_sfp_module(params, phy, 1);
bnx2x_set_gpio_int(bp, gpio_num,
MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
gpio_port);
- if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
+ if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) {
bnx2x_sfp_module_detection(phy, params);
- else
+ if (CHIP_IS_E3(bp)) {
+ u16 rx_tx_in_reset;
+ /* In case WC is out of reset, reconfigure the
+ * link speed while taking into account 1G
+ * module limitation.
+ */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC6,
+ &rx_tx_in_reset);
+ if (!rx_tx_in_reset) {
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
+ bnx2x_warpcore_config_sfi(phy, params);
+ bnx2x_warpcore_reset_lane(bp, phy, 0);
+ }
+ }
+ } else {
DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
+ }
} else {
u32 val = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
@@ -8463,7 +8601,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
MDIO_PMA_LASI_TXCTRL);
- /* clear LASI indication*/
+ /* Clear LASI indication*/
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
bnx2x_cl45_read(bp, phy,
@@ -8531,7 +8669,7 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val);
if (val)
break;
- msleep(10);
+ usleep_range(10000, 20000);
}
DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt);
if ((params->feature_config_flags &
@@ -8660,7 +8798,7 @@ static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
MDIO_PMA_REG_GEN_CTRL,
MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
- /* wait for 150ms for microcode load */
+ /* Wait for 150ms for microcode load */
msleep(150);
/* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
@@ -8854,6 +8992,63 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
}
+static void bnx2x_8727_config_speed(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 tmp1, val;
+ /* Set option 1G speed */
+ if ((phy->req_line_speed == SPEED_1000) ||
+ (phy->media_type == ETH_PHY_SFP_1G_FIBER)) {
+ DP(NETIF_MSG_LINK, "Setting 1G force\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
+ DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
+ /* Power down the XAUI until link is up in case of dual-media
+ * and 1G
+ */
+ if (DUAL_MEDIA(params)) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, &val);
+ val |= (3<<10);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, val);
+ }
+ } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
+ } else {
+ /* Since the 8727 has only single reset pin, need to set the 10G
+ * registers although it is default
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
+ 0x0020);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
+ 0x0008);
+ }
+}
+
static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -8871,7 +9066,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
lasi_ctrl_val = 0x0006;
DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
- /* enable LASI */
+ /* Enable LASI */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
rx_alarm_ctrl_val);
@@ -8923,56 +9118,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
- /* Set option 1G speed */
- if (phy->req_line_speed == SPEED_1000) {
- DP(NETIF_MSG_LINK, "Setting 1G force\n");
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
- bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
- DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
- /* Power down the XAUI until link is up in case of dual-media
- * and 1G
- */
- if (DUAL_MEDIA(params)) {
- bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_PCS_GP, &val);
- val |= (3<<10);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_PCS_GP, val);
- }
- } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
- ((phy->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
- ((phy->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
-
- DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
- } else {
- /* Since the 8727 has only single reset pin, need to set the 10G
- * registers although it is default
- */
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
- 0x0020);
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
- 0x0008);
- }
-
+ bnx2x_8727_config_speed(phy, params);
/* Set 2-wire transfer rate of SFP+ module EEPROM
* to 100Khz since some DACs(direct attached cables) do
* not work at 400Khz.
@@ -9099,6 +9245,9 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
bnx2x_sfp_module_detection(phy, params);
else
DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
+
+ /* Reconfigure link speed based on module type limitations */
+ bnx2x_8727_config_speed(phy, params);
}
DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
@@ -9579,9 +9728,9 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
struct link_params *params,
u16 fw_cmd,
- u16 cmd_args[])
+ u16 cmd_args[], int argc)
{
- u32 idx;
+ int idx;
u16 val;
struct bnx2x *bp = params->bp;
/* Write CMD_OPEN_OVERRIDE to STATUS reg */
@@ -9593,7 +9742,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
MDIO_84833_CMD_HDLR_STATUS, &val);
if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
if (idx >= PHY84833_CMDHDLR_WAIT) {
DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
@@ -9601,7 +9750,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
}
/* Prepare argument(s) and issue command */
- for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+ for (idx = 0; idx < argc; idx++) {
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
MDIO_84833_CMD_HDLR_DATA1 + idx,
cmd_args[idx]);
@@ -9614,7 +9763,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
(val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
if ((idx >= PHY84833_CMDHDLR_WAIT) ||
(val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
@@ -9622,7 +9771,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
return -EINVAL;
}
/* Gather returning data */
- for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+ for (idx = 0; idx < argc; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
MDIO_84833_CMD_HDLR_DATA1 + idx,
&cmd_args[idx]);
@@ -9656,7 +9805,7 @@ static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
data[1] = (u16)pair_swap;
status = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_PAIR_SWAP, data);
+ PHY84833_CMD_SET_PAIR_SWAP, data, PHY84833_CMDHDLR_MAX_ARGS);
if (status == 0)
DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
@@ -9734,6 +9883,95 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
return 0;
}
+static int bnx2x_8483x_eee_timers(struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 eee_idle = 0, eee_mode;
+ struct bnx2x *bp = params->bp;
+
+ eee_idle = bnx2x_eee_calc_timer(params);
+
+ if (eee_idle) {
+ REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
+ eee_idle);
+ } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
+ (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
+ (params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
+ DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
+ return -EINVAL;
+ }
+
+ vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
+ if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
+ /* eee_idle in 1u --> eee_status in 16u */
+ eee_idle >>= 4;
+ vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
+ SHMEM_EEE_TIME_OUTPUT_BIT;
+ } else {
+ if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
+ return -EINVAL;
+ vars->eee_status |= eee_mode;
+ }
+
+ return 0;
+}
+
+static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ int rc;
+ struct bnx2x *bp = params->bp;
+ u16 cmd_args = 0;
+
+ DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
+
+ /* Make Certain LPI is disabled */
+ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
+ REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
+
+ /* Prevent Phy from working in EEE and advertising it */
+ rc = bnx2x_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "EEE disable failed.\n");
+ return rc;
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0);
+ vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+
+ return 0;
+}
+
+static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ int rc;
+ struct bnx2x *bp = params->bp;
+ u16 cmd_args = 1;
+
+ DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
+
+ rc = bnx2x_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "EEE enable failed.\n");
+ return rc;
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x8);
+
+ /* Mask events preventing LPI generation */
+ REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
+
+ vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+ vars->eee_status |= (SHMEM_EEE_10G_ADV << SHMEM_EEE_ADV_STATUS_SHIFT);
+
+ return 0;
+}
+
#define PHY84833_CONSTANT_LATENCY 1193
static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
struct link_params *params,
@@ -9746,9 +9984,9 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
int rc = 0;
- msleep(1);
+ usleep_range(1000, 2000);
- if (!(CHIP_IS_E1(bp)))
+ if (!(CHIP_IS_E1x(bp)))
port = BP_PATH(bp);
else
port = params->port;
@@ -9833,8 +10071,9 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
cmd_args[3] = PHY84833_CONSTANT_LATENCY;
rc = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_EEE_MODE, cmd_args);
- if (rc != 0)
+ PHY84833_CMD_SET_EEE_MODE, cmd_args,
+ PHY84833_CMDHDLR_MAX_ARGS);
+ if (rc)
DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
}
if (initialize)
@@ -9858,6 +10097,48 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
MDIO_CTL_REG_84823_USER_CTRL_REG, val);
}
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_FW_REV, &val);
+
+ /* Configure EEE support */
+ if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && bnx2x_eee_has_cap(params)) {
+ phy->flags |= FLAGS_EEE_10GBT;
+ vars->eee_status |= SHMEM_EEE_10G_ADV <<
+ SHMEM_EEE_SUPPORTED_SHIFT;
+ /* Propogate params' bits --> vars (for migration exposure) */
+ if (params->eee_mode & EEE_MODE_ENABLE_LPI)
+ vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
+ else
+ vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
+
+ if (params->eee_mode & EEE_MODE_ADV_LPI)
+ vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
+ else
+ vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
+
+ rc = bnx2x_8483x_eee_timers(params, vars);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
+ bnx2x_8483x_disable_eee(phy, params, vars);
+ return rc;
+ }
+
+ if ((params->req_duplex[actual_phy_selection] == DUPLEX_FULL) &&
+ (params->eee_mode & EEE_MODE_ADV_LPI) &&
+ (bnx2x_eee_calc_timer(params) ||
+ !(params->eee_mode & EEE_MODE_ENABLE_LPI)))
+ rc = bnx2x_8483x_enable_eee(phy, params, vars);
+ else
+ rc = bnx2x_8483x_disable_eee(phy, params, vars);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "Failed to set EEE advertisment\n");
+ return rc;
+ }
+ } else {
+ phy->flags &= ~FLAGS_EEE_10GBT;
+ vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
+ }
+
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
/* Bring PHY out of super isolate mode as the final step. */
bnx2x_cl45_read(bp, phy,
@@ -9912,17 +10193,19 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Legacy speed status = 0x%x\n",
legacy_status);
link_up = ((legacy_status & (1<<11)) == (1<<11));
- if (link_up) {
- legacy_speed = (legacy_status & (3<<9));
- if (legacy_speed == (0<<9))
- vars->line_speed = SPEED_10;
- else if (legacy_speed == (1<<9))
- vars->line_speed = SPEED_100;
- else if (legacy_speed == (2<<9))
- vars->line_speed = SPEED_1000;
- else /* Should not happen */
- vars->line_speed = 0;
+ legacy_speed = (legacy_status & (3<<9));
+ if (legacy_speed == (0<<9))
+ vars->line_speed = SPEED_10;
+ else if (legacy_speed == (1<<9))
+ vars->line_speed = SPEED_100;
+ else if (legacy_speed == (2<<9))
+ vars->line_speed = SPEED_1000;
+ else { /* Should not happen: Treat as link down */
+ vars->line_speed = 0;
+ link_up = 0;
+ }
+ if (link_up) {
if (legacy_status & (1<<8))
vars->duplex = DUPLEX_FULL;
else
@@ -9950,7 +10233,7 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
}
}
if (link_up) {
- DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n",
+ DP(NETIF_MSG_LINK, "BCM848x3: link speed is %d\n",
vars->line_speed);
bnx2x_ext_phy_resolve_fc(phy, params, vars);
@@ -9989,6 +10272,31 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
if (val & (1<<11))
vars->link_status |=
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+ /* Determine if EEE was negotiated */
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ u32 eee_shmem = 0;
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_EEE_ADV, &val1);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_EEE_ADV, &val2);
+ if ((val1 & val2) & 0x8) {
+ DP(NETIF_MSG_LINK, "EEE negotiated\n");
+ vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
+ }
+
+ if (val2 & 0x12)
+ eee_shmem |= SHMEM_EEE_100M_ADV;
+ if (val2 & 0x4)
+ eee_shmem |= SHMEM_EEE_1G_ADV;
+ if (val2 & 0x68)
+ eee_shmem |= SHMEM_EEE_10G_ADV;
+
+ vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
+ vars->eee_status |= (eee_shmem <<
+ SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+ }
}
return link_up;
@@ -10267,7 +10575,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
u32 cfg_pin;
DP(NETIF_MSG_LINK, "54618SE cfg init\n");
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
/* This works with E3 only, no need to check the chip
* before determining the port.
@@ -10336,7 +10644,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
- /* read all advertisement */
+ /* Read all advertisement */
bnx2x_cl22_read(bp, phy,
0x09,
&an_1000_val);
@@ -10373,7 +10681,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
0x09,
&an_1000_val);
- /* set 100 speed advertisement */
+ /* Set 100 speed advertisement */
if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask &
(PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
@@ -10387,7 +10695,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Advertising 100M\n");
}
- /* set 10 speed advertisement */
+ /* Set 10 speed advertisement */
if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask &
(PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
@@ -10526,7 +10834,7 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
/* Get speed operation status */
bnx2x_cl22_read(bp, phy,
- 0x19,
+ MDIO_REG_GPHY_AUX_STATUS,
&legacy_status);
DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status);
@@ -10753,7 +11061,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
val2, val1);
link_up = ((val1 & 4) == 4);
- /* if link is up print the AN outcome of the SFX7101 PHY */
+ /* If link is up print the AN outcome of the SFX7101 PHY */
if (link_up) {
bnx2x_cl45_read(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
@@ -10765,7 +11073,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
bnx2x_ext_phy_resolve_fc(phy, params, vars);
- /* read LP advertised speeds */
+ /* Read LP advertised speeds */
if (val2 & (1<<11))
vars->link_status |=
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
@@ -11084,7 +11392,7 @@ static struct bnx2x_phy phy_8706 = {
SUPPORTED_FIBRE |
SUPPORTED_Pause |
SUPPORTED_Asym_Pause),
- .media_type = ETH_PHY_SFP_FIBER,
+ .media_type = ETH_PHY_SFPP_10G_FIBER,
.ver_addr = 0,
.req_flow_ctrl = 0,
.req_line_speed = 0,
@@ -11243,7 +11551,8 @@ static struct bnx2x_phy phy_84833 = {
.def_md_devad = 0,
.flags = (FLAGS_FAN_FAILURE_DET_REQ |
FLAGS_REARM_LATCH_SIGNAL |
- FLAGS_TX_ERROR_CHECK),
+ FLAGS_TX_ERROR_CHECK |
+ FLAGS_EEE_10GBT),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -11422,7 +11731,7 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
SUPPORTED_FIBRE |
SUPPORTED_Pause |
SUPPORTED_Asym_Pause);
- phy->media_type = ETH_PHY_SFP_FIBER;
+ phy->media_type = ETH_PHY_SFPP_10G_FIBER;
break;
case PORT_HW_CFG_NET_SERDES_IF_KR:
phy->media_type = ETH_PHY_KR;
@@ -11962,7 +12271,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
vars->mac_type = MAC_TYPE_NONE;
vars->phy_flags = 0;
- /* disable attentions */
+ /* Disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
(NIG_MASK_XGXS0_LINK_STATUS |
NIG_MASK_XGXS0_LINK10G |
@@ -12011,6 +12320,8 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
break;
}
bnx2x_update_mng(params, vars->link_status);
+
+ bnx2x_update_mng_eee(params, vars->eee_status);
return 0;
}
@@ -12020,19 +12331,22 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
struct bnx2x *bp = params->bp;
u8 phy_index, port = params->port, clear_latch_ind = 0;
DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
- /* disable attentions */
+ /* Disable attentions */
vars->link_status = 0;
bnx2x_update_mng(params, vars->link_status);
+ vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
+ SHMEM_EEE_ACTIVE_BIT);
+ bnx2x_update_mng_eee(params, vars->eee_status);
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
(NIG_MASK_XGXS0_LINK_STATUS |
NIG_MASK_XGXS0_LINK10G |
NIG_MASK_SERDES0_LINK_STATUS |
NIG_MASK_MI_INT));
- /* activate nig drain */
+ /* Activate nig drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
- /* disable nig egress interface */
+ /* Disable nig egress interface */
if (!CHIP_IS_E3(bp)) {
REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
@@ -12045,15 +12359,15 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
bnx2x_xmac_disable(params);
bnx2x_umac_disable(params);
}
- /* disable emac */
+ /* Disable emac */
if (!CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
- msleep(10);
+ usleep_range(10000, 20000);
/* The PHY reset is controlled by GPIO 1
* Hold it as vars low
*/
- /* clear link led */
+ /* Clear link led */
bnx2x_set_mdio_clk(bp, params->chip_id, port);
bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
@@ -12083,9 +12397,9 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
params->phy[INT_PHY].link_reset(
&params->phy[INT_PHY], params);
- /* disable nig ingress interface */
+ /* Disable nig ingress interface */
if (!CHIP_IS_E3(bp)) {
- /* reset BigMac */
+ /* Reset BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0);
@@ -12142,7 +12456,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
DP(NETIF_MSG_LINK, "populate_phy failed\n");
return -EINVAL;
}
- /* disable attentions */
+ /* Disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
port_of_path*4,
(NIG_MASK_XGXS0_LINK_STATUS |
@@ -12216,7 +12530,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
bnx2x_cl45_write(bp, phy_blk[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
- msleep(15);
+ usleep_range(15000, 30000);
/* Read modify write the SPI-ROM version select register */
bnx2x_cl45_read(bp, phy_blk[port],
@@ -12248,7 +12562,7 @@ static int bnx2x_8726_common_init_phy(struct bnx2x *bp,
REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
bnx2x_ext_phy_hw_reset(bp, 0);
- msleep(5);
+ usleep_range(5000, 10000);
for (port = 0; port < PORT_MAX; port++) {
u32 shmem_base, shmem2_base;
@@ -12355,11 +12669,11 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
/* Initiate PHY reset*/
bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
port);
- msleep(1);
+ usleep_range(1000, 2000);
bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
port);
- msleep(5);
+ usleep_range(5000, 10000);
/* PART1 - Reset both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
@@ -12453,7 +12767,7 @@ static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
MDIO_PMA_REG_CTRL, &val);
if (!(val & (1<<15)))
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
if (cnt >= 1500) {
DP(NETIF_MSG_LINK, "84833 reset timeout\n");
@@ -12543,7 +12857,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
break;
}
- if (rc != 0)
+ if (rc)
netdev_err(bp->dev, "Warning: PHY was not initialized,"
" Port %d\n",
0);
@@ -12624,30 +12938,41 @@ static void bnx2x_check_over_curr(struct link_params *params,
vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
}
-static void bnx2x_analyze_link_error(struct link_params *params,
- struct link_vars *vars, u32 lss_status,
- u8 notify)
+/* Returns 0 if no change occured since last check; 1 otherwise. */
+static u8 bnx2x_analyze_link_error(struct link_params *params,
+ struct link_vars *vars, u32 status,
+ u32 phy_flag, u32 link_flag, u8 notify)
{
struct bnx2x *bp = params->bp;
/* Compare new value with previous value */
u8 led_mode;
- u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0;
+ u32 old_status = (vars->phy_flags & phy_flag) ? 1 : 0;
- if ((lss_status ^ half_open_conn) == 0)
- return;
+ if ((status ^ old_status) == 0)
+ return 0;
/* If values differ */
- DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up,
- half_open_conn, lss_status);
+ switch (phy_flag) {
+ case PHY_HALF_OPEN_CONN_FLAG:
+ DP(NETIF_MSG_LINK, "Analyze Remote Fault\n");
+ break;
+ case PHY_SFP_TX_FAULT_FLAG:
+ DP(NETIF_MSG_LINK, "Analyze TX Fault\n");
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Analyze UNKOWN\n");
+ }
+ DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
+ old_status, status);
/* a. Update shmem->link_status accordingly
* b. Update link_vars->link_up
*/
- if (lss_status) {
- DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n");
+ if (status) {
vars->link_status &= ~LINK_STATUS_LINK_UP;
+ vars->link_status |= link_flag;
vars->link_up = 0;
- vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ vars->phy_flags |= phy_flag;
/* activate nig drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
@@ -12656,10 +12981,10 @@ static void bnx2x_analyze_link_error(struct link_params *params,
*/
led_mode = LED_MODE_OFF;
} else {
- DP(NETIF_MSG_LINK, "Remote Fault cleared\n");
vars->link_status |= LINK_STATUS_LINK_UP;
+ vars->link_status &= ~link_flag;
vars->link_up = 1;
- vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
+ vars->phy_flags &= ~phy_flag;
led_mode = LED_MODE_OPER;
/* Clear nig drain */
@@ -12676,6 +13001,8 @@ static void bnx2x_analyze_link_error(struct link_params *params,
vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT;
if (notify)
bnx2x_notify_link_changed(bp);
+
+ return 1;
}
/******************************************************************************
@@ -12717,7 +13044,9 @@ int bnx2x_check_half_open_conn(struct link_params *params,
if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
lss_status = 1;
- bnx2x_analyze_link_error(params, vars, lss_status, notify);
+ bnx2x_analyze_link_error(params, vars, lss_status,
+ PHY_HALF_OPEN_CONN_FLAG,
+ LINK_STATUS_NONE, notify);
} else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
/* Check E1X / E2 BMAC */
@@ -12734,11 +13063,55 @@ int bnx2x_check_half_open_conn(struct link_params *params,
REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2);
lss_status = (wb_data[0] > 0);
- bnx2x_analyze_link_error(params, vars, lss_status, notify);
+ bnx2x_analyze_link_error(params, vars, lss_status,
+ PHY_HALF_OPEN_CONN_FLAG,
+ LINK_STATUS_NONE, notify);
}
return 0;
}
+static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u32 cfg_pin, value = 0;
+ u8 led_change, port = params->port;
+
+ /* Get The SFP+ TX_Fault controlling pin ([eg]pio) */
+ cfg_pin = (REG_RD(bp, params->shmem_base + offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+ PORT_HW_CFG_E3_TX_FAULT_MASK) >>
+ PORT_HW_CFG_E3_TX_FAULT_SHIFT;
+ if (bnx2x_get_cfg_pin(bp, cfg_pin, &value)) {
+ DP(NETIF_MSG_LINK, "Failed to read pin 0x%02x\n", cfg_pin);
+ return;
+ }
+
+ led_change = bnx2x_analyze_link_error(params, vars, value,
+ PHY_SFP_TX_FAULT_FLAG,
+ LINK_STATUS_SFP_TX_FAULT, 1);
+
+ if (led_change) {
+ /* Change TX_Fault led, set link status for further syncs */
+ u8 led_mode;
+
+ if (vars->phy_flags & PHY_SFP_TX_FAULT_FLAG) {
+ led_mode = MISC_REGISTERS_GPIO_HIGH;
+ vars->link_status |= LINK_STATUS_SFP_TX_FAULT;
+ } else {
+ led_mode = MISC_REGISTERS_GPIO_LOW;
+ vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT;
+ }
+
+ /* If module is unapproved, led should be on regardless */
+ if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) {
+ DP(NETIF_MSG_LINK, "Change TX_Fault LED: ->%x\n",
+ led_mode);
+ bnx2x_set_e3_module_fault_led(params, led_mode);
+ }
+ }
+}
void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
{
u16 phy_idx;
@@ -12757,7 +13130,26 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
struct bnx2x_phy *phy = &params->phy[INT_PHY];
bnx2x_set_aer_mmd(params, phy);
bnx2x_check_over_curr(params, vars);
- bnx2x_warpcore_config_runtime(phy, params, vars);
+ if (vars->rx_tx_asic_rst)
+ bnx2x_warpcore_config_runtime(phy, params, vars);
+
+ if ((REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg))
+ & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
+ PORT_HW_CFG_NET_SERDES_IF_SFI) {
+ if (bnx2x_is_sfp_module_plugged(phy, params)) {
+ bnx2x_sfp_tx_fault_detection(phy, params, vars);
+ } else if (vars->link_status &
+ LINK_STATUS_SFP_TX_FAULT) {
+ /* Clean trail, interrupt corrects the leds */
+ vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT;
+ vars->phy_flags &= ~PHY_SFP_TX_FAULT_FLAG;
+ /* Update link status in the shared memory */
+ bnx2x_update_mng(params, vars->link_status);
+ }
+ }
+
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index ea4371f..51cac81 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -41,6 +41,7 @@
#define SPEED_AUTO_NEG 0
#define SPEED_20000 20000
+#define SFP_EEPROM_PAGE_SIZE 16
#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14
#define SFP_EEPROM_VENDOR_NAME_SIZE 16
#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
@@ -125,6 +126,11 @@ typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
struct link_params *params, u8 mode);
typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy,
struct link_params *params, u32 action);
+struct bnx2x_reg_set {
+ u8 devad;
+ u16 reg;
+ u16 val;
+};
struct bnx2x_phy {
u32 type;
@@ -149,6 +155,7 @@ struct bnx2x_phy {
#define FLAGS_DUMMY_READ (1<<9)
#define FLAGS_MDC_MDIO_WA_B0 (1<<10)
#define FLAGS_TX_ERROR_CHECK (1<<12)
+#define FLAGS_EEE_10GBT (1<<13)
/* preemphasis values for the rx side */
u16 rx_preemphasis[4];
@@ -162,14 +169,15 @@ struct bnx2x_phy {
u32 supported;
u32 media_type;
-#define ETH_PHY_UNSPECIFIED 0x0
-#define ETH_PHY_SFP_FIBER 0x1
-#define ETH_PHY_XFP_FIBER 0x2
-#define ETH_PHY_DA_TWINAX 0x3
-#define ETH_PHY_BASE_T 0x4
-#define ETH_PHY_KR 0xf0
-#define ETH_PHY_CX4 0xf1
-#define ETH_PHY_NOT_PRESENT 0xff
+#define ETH_PHY_UNSPECIFIED 0x0
+#define ETH_PHY_SFPP_10G_FIBER 0x1
+#define ETH_PHY_XFP_FIBER 0x2
+#define ETH_PHY_DA_TWINAX 0x3
+#define ETH_PHY_BASE_T 0x4
+#define ETH_PHY_SFP_1G_FIBER 0x5
+#define ETH_PHY_KR 0xf0
+#define ETH_PHY_CX4 0xf1
+#define ETH_PHY_NOT_PRESENT 0xff
/* The address in which version is located*/
u32 ver_addr;
@@ -265,6 +273,30 @@ struct link_params {
u8 num_phys;
u8 rsrv;
+
+ /* Used to configure the EEE Tx LPI timer, has several modes of
+ * operation, according to bits 29:28 -
+ * 2'b00: Timer will be configured by nvram, output will be the value
+ * from nvram.
+ * 2'b01: Timer will be configured by nvram, output will be in
+ * microseconds.
+ * 2'b10: bits 1:0 contain an nvram value which will be used instead
+ * of the one located in the nvram. Output will be that value.
+ * 2'b11: bits 19:0 contain the idle timer in microseconds; output
+ * will be in microseconds.
+ * Bits 31:30 should be 2'b11 in order for EEE to be enabled.
+ */
+ u32 eee_mode;
+#define EEE_MODE_NVRAM_BALANCED_TIME (0xa00)
+#define EEE_MODE_NVRAM_AGGRESSIVE_TIME (0x100)
+#define EEE_MODE_NVRAM_LATENCY_TIME (0x6000)
+#define EEE_MODE_NVRAM_MASK (0x3)
+#define EEE_MODE_TIMER_MASK (0xfffff)
+#define EEE_MODE_OUTPUT_TIME (1<<28)
+#define EEE_MODE_OVERRIDE_NVRAM (1<<29)
+#define EEE_MODE_ENABLE_LPI (1<<30)
+#define EEE_MODE_ADV_LPI (1<<31)
+
u16 hw_led_mode; /* part of the hw_config read from the shmem */
u32 multi_phy_config;
@@ -282,6 +314,7 @@ struct link_vars {
#define PHY_PHYSICAL_LINK_FLAG (1<<2)
#define PHY_HALF_OPEN_CONN_FLAG (1<<3)
#define PHY_OVER_CURRENT_FLAG (1<<4)
+#define PHY_SFP_TX_FAULT_FLAG (1<<5)
u8 mac_type;
#define MAC_TYPE_NONE 0
@@ -301,6 +334,7 @@ struct link_vars {
/* The same definitions as the shmem parameter */
u32 link_status;
+ u32 eee_status;
u8 fault_detected;
u8 rsrv1;
u16 periodic_flags;
@@ -459,8 +493,7 @@ struct bnx2x_ets_params {
struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS];
};
-/**
- * Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
+/* Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
* when link is already up
*/
int bnx2x_update_pfc(struct link_params *params,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index f755a66..08eca3f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -74,6 +74,8 @@
#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
+#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
+
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
@@ -104,7 +106,7 @@ MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
#define INT_MODE_INTx 1
#define INT_MODE_MSI 2
-static int int_mode;
+int int_mode;
module_param(int_mode, int, 0);
MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
"(1 INT#x; 2 MSI)");
@@ -758,7 +760,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
/* Tx */
for_each_cos_in_tx_queue(fp, cos)
{
- txdata = fp->txdata[cos];
+ txdata = *fp->txdata_ptr[cos];
BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
i, txdata.tx_pkt_prod,
txdata.tx_pkt_cons, txdata.tx_bd_prod,
@@ -876,7 +878,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
for_each_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
for_each_cos_in_tx_queue(fp, cos) {
- struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
@@ -1583,7 +1585,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
- struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj;
+ struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
DP(BNX2X_MSG_SP,
"fp %d cid %d got ramrod #%d state is %x type is %d\n",
@@ -1710,7 +1712,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
/* Handle Rx or Tx according to SB id */
prefetch(fp->rx_cons_sb);
for_each_cos_in_tx_queue(fp, cos)
- prefetch(fp->txdata[cos].tx_cons_sb);
+ prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
prefetch(&fp->sb_running_index[SM_RX_ID]);
napi_schedule(&bnx2x_fp(bp, fp->index, napi));
status &= ~mask;
@@ -2124,6 +2126,11 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
}
}
+ if (load_mode == LOAD_LOOPBACK_EXT) {
+ struct link_params *lp = &bp->link_params;
+ lp->loopback_mode = LOOPBACK_EXT;
+ }
+
rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
bnx2x_release_phy_lock(bp);
@@ -2916,7 +2923,7 @@ static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
u8 cos)
{
- txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping;
+ txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
txq_init->fw_sb_id = fp->fw_sb_id;
@@ -3030,9 +3037,9 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
memcpy(ether_stat->version, DRV_MODULE_VERSION,
ETH_STAT_INFO_VERSION_LEN - 1);
- bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj,
- DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
- ether_stat->mac_local);
+ bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
+ DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+ ether_stat->mac_local);
ether_stat->mtu_size = bp->dev->mtu;
@@ -3055,7 +3062,8 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
struct fcoe_stats_info *fcoe_stat =
&bp->slowpath->drv_info_to_mcp.fcoe_stat;
- memcpy(fcoe_stat->mac_local, bp->fip_mac, ETH_ALEN);
+ memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
+ bp->fip_mac, ETH_ALEN);
fcoe_stat->qos_priority =
app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
@@ -3063,11 +3071,11 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
/* insert FCoE stats from ramrod response */
if (!NO_FCOE(bp)) {
struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
- &bp->fw_stats_data->queue_stats[FCOE_IDX].
+ &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
tstorm_queue_statistics;
struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
- &bp->fw_stats_data->queue_stats[FCOE_IDX].
+ &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
xstorm_queue_statistics;
struct fcoe_statistics_params *fw_fcoe_stat =
@@ -3146,7 +3154,8 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
struct iscsi_stats_info *iscsi_stat =
&bp->slowpath->drv_info_to_mcp.iscsi_stat;
- memcpy(iscsi_stat->mac_local, bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+ memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
+ bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
iscsi_stat->qos_priority =
app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
@@ -3176,6 +3185,12 @@ static void bnx2x_set_mf_bw(struct bnx2x *bp)
bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
}
+static void bnx2x_handle_eee_event(struct bnx2x *bp)
+{
+ DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
+}
+
static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
{
enum drv_info_opcode op_code;
@@ -3742,6 +3757,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
if (val & DRV_STATUS_AFEX_EVENT_MASK)
bnx2x_handle_afex_cmd(bp,
val & DRV_STATUS_AFEX_EVENT_MASK);
+ if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
+ bnx2x_handle_eee_event(bp);
if (bp->link_vars.periodic_flags &
PERIODIC_FLAGS_LINK_EVENT) {
/* sync with link */
@@ -4615,11 +4632,11 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
case BNX2X_FILTER_MAC_PENDING:
DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
#ifdef BCM_CNIC
- if (cid == BNX2X_ISCSI_ETH_CID)
+ if (cid == BNX2X_ISCSI_ETH_CID(bp))
vlan_mac_obj = &bp->iscsi_l2_mac_obj;
else
#endif
- vlan_mac_obj = &bp->fp[cid].mac_obj;
+ vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
break;
case BNX2X_FILTER_MCAST_PENDING:
@@ -4717,7 +4734,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
for_each_eth_queue(bp, q) {
/* Set the appropriate Queue object */
fp = &bp->fp[q];
- queue_params.q_obj = &fp->q_obj;
+ queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* send the ramrod */
rc = bnx2x_queue_state_change(bp, &queue_params);
@@ -4728,8 +4745,8 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
#ifdef BCM_CNIC
if (!NO_FCOE(bp)) {
- fp = &bp->fp[FCOE_IDX];
- queue_params.q_obj = &fp->q_obj;
+ fp = &bp->fp[FCOE_IDX(bp)];
+ queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* clear pending completion bit */
__clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
@@ -4761,11 +4778,11 @@ static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
{
DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
#ifdef BCM_CNIC
- if (cid == BNX2X_FCOE_ETH_CID)
- return &bnx2x_fcoe(bp, q_obj);
+ if (cid == BNX2X_FCOE_ETH_CID(bp))
+ return &bnx2x_fcoe_sp_obj(bp, q_obj);
else
#endif
- return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj);
+ return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
}
static void bnx2x_eq_int(struct bnx2x *bp)
@@ -5647,15 +5664,15 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
/* init tx data */
for_each_cos_in_tx_queue(fp, cos) {
- bnx2x_init_txdata(bp, &fp->txdata[cos],
- CID_COS_TO_TX_ONLY_CID(fp->cid, cos),
- FP_COS_TO_TXQ(fp, cos),
- BNX2X_TX_SB_INDEX_BASE + cos);
- cids[cos] = fp->txdata[cos].cid;
+ bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
+ CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
+ FP_COS_TO_TXQ(fp, cos, bp),
+ BNX2X_TX_SB_INDEX_BASE + cos, fp);
+ cids[cos] = fp->txdata_ptr[cos]->cid;
}
- bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos,
- BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
+ fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
bnx2x_sp_mapping(bp, q_rdata), q_type);
/**
@@ -5706,7 +5723,7 @@ static void bnx2x_init_tx_rings(struct bnx2x *bp)
for_each_tx_queue(bp, i)
for_each_cos_in_tx_queue(&bp->fp[i], cos)
- bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]);
+ bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
}
void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
@@ -7055,12 +7072,10 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
for (i = 0; i < L2_ILT_LINES(bp); i++) {
- ilt->lines[cdu_ilt_start + i].page =
- bp->context.vcxt + (ILT_PAGE_CIDS * i);
+ ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
ilt->lines[cdu_ilt_start + i].page_mapping =
- bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
- /* cdu ilt pages are allocated manually so there's no need to
- set the size */
+ bp->context[i].cxt_mapping;
+ ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
}
bnx2x_ilt_init_op(bp, INITOP_SET);
@@ -7327,6 +7342,8 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
void bnx2x_free_mem(struct bnx2x *bp)
{
+ int i;
+
/* fastpath */
bnx2x_free_fp_mem(bp);
/* end of fastpath */
@@ -7340,9 +7357,9 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
sizeof(struct bnx2x_slowpath));
- BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
- bp->context.size);
-
+ for (i = 0; i < L2_ILT_LINES(bp); i++)
+ BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
+ bp->context[i].size);
bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
BNX2X_FREE(bp->ilt->lines);
@@ -7428,6 +7445,8 @@ alloc_mem_err:
int bnx2x_alloc_mem(struct bnx2x *bp)
{
+ int i, allocated, context_size;
+
#ifdef BCM_CNIC
if (!CHIP_IS_E1x(bp))
/* size = the status block + ramrod buffers */
@@ -7457,11 +7476,29 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
if (bnx2x_alloc_fw_stats_mem(bp))
goto alloc_mem_err;
- bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
-
- BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
- bp->context.size);
+ /* Allocate memory for CDU context:
+ * This memory is allocated separately and not in the generic ILT
+ * functions because CDU differs in few aspects:
+ * 1. There are multiple entities allocating memory for context -
+ * 'regular' driver, CNIC and SRIOV driver. Each separately controls
+ * its own ILT lines.
+ * 2. Since CDU page-size is not a single 4KB page (which is the case
+ * for the other ILT clients), to be efficient we want to support
+ * allocation of sub-page-size in the last entry.
+ * 3. Context pointers are used by the driver to pass to FW / update
+ * the context (for the other ILT clients the pointers are used just to
+ * free the memory during unload).
+ */
+ context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
+ for (i = 0, allocated = 0; allocated < context_size; i++) {
+ bp->context[i].size = min(CDU_ILT_PAGE_SZ,
+ (context_size - allocated));
+ BNX2X_PCI_ALLOC(bp->context[i].vcxt,
+ &bp->context[i].cxt_mapping,
+ bp->context[i].size);
+ allocated += bp->context[i].size;
+ }
BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
@@ -7563,8 +7600,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
/* Eth MAC is set on RSS leading client (fp[0]) */
- return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set,
- BNX2X_ETH_MAC, &ramrod_flags);
+ return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj,
+ set, BNX2X_ETH_MAC, &ramrod_flags);
}
int bnx2x_setup_leading(struct bnx2x *bp)
@@ -7579,7 +7616,7 @@ int bnx2x_setup_leading(struct bnx2x *bp)
*
* In case of MSI-X it will also try to enable MSI-X.
*/
-static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
+void bnx2x_set_int_mode(struct bnx2x *bp)
{
switch (int_mode) {
case INT_MODE_MSI:
@@ -7590,11 +7627,6 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
BNX2X_DEV_INFO("set number of queues to 1\n");
break;
default:
- /* Set number of queues for MSI-X mode */
- bnx2x_set_num_queues(bp);
-
- BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
-
/* if we can't use MSI-X we only need one fp,
* so try to enable MSI-X with the requested number of fp's
* and fallback to MSI or legacy INTx with one fp
@@ -7735,6 +7767,8 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
{
u8 cos;
+ int cxt_index, cxt_offset;
+
/* FCoE Queue uses Default SB, thus has no HC capabilities */
if (!IS_FCOE_FP(fp)) {
__set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
@@ -7771,9 +7805,13 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
fp->index, init_params->max_cos);
/* set the context pointers queue object */
- for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++)
+ for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
+ cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
+ cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
+ ILT_PAGE_CIDS);
init_params->cxts[cos] =
- &bp->context.vcxt[fp->txdata[cos].cid].eth;
+ &bp->context[cxt_index].vcxt[cxt_offset].eth;
+ }
}
int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
@@ -7838,7 +7876,7 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
IGU_INT_ENABLE, 0);
- q_params.q_obj = &fp->q_obj;
+ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* We want to wait for completion in this context */
__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
@@ -7911,7 +7949,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
- q_params.q_obj = &fp->q_obj;
+ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* We want to wait for completion in this context */
__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
@@ -7922,7 +7960,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
tx_index++){
/* ascertain this is a normal queue*/
- txdata = &fp->txdata[tx_index];
+ txdata = fp->txdata_ptr[tx_index];
DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
txdata->txq_index);
@@ -8289,7 +8327,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
struct bnx2x_fastpath *fp = &bp->fp[i];
for_each_cos_in_tx_queue(fp, cos)
- rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]);
+ rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
#ifdef BNX2X_STOP_ON_ERROR
if (rc)
return;
@@ -8300,12 +8338,13 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
usleep_range(1000, 1000);
/* Clean all ETH MACs */
- rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false);
+ rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
+ false);
if (rc < 0)
BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
/* Clean up UC list */
- rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC,
+ rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
true);
if (rc < 0)
BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
@@ -9697,6 +9736,11 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
BC_SUPPORTS_PFC_STATS : 0;
+ bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
+ BC_SUPPORTS_FCOE_FEATURES : 0;
+
+ bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
+ BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
boot_mode = SHMEM_RD(bp,
dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
@@ -10082,7 +10126,7 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
{
int port = BP_PORT(bp);
u32 config;
- u32 ext_phy_type, ext_phy_config;
+ u32 ext_phy_type, ext_phy_config, eee_mode;
bp->link_params.bp = bp;
bp->link_params.port = port;
@@ -10149,6 +10193,19 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
bp->common.shmem_base,
bp->common.shmem2_base);
+
+ /* Configure link feature according to nvram value */
+ eee_mode = (((SHMEM_RD(bp, dev_info.
+ port_feature_config[port].eee_power_mode)) &
+ PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
+ PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
+ if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
+ bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
+ EEE_MODE_ENABLE_LPI |
+ EEE_MODE_OUTPUT_TIME;
+ } else {
+ bp->link_params.eee_mode = 0;
+ }
}
void bnx2x_get_iscsi_info(struct bnx2x *bp)
@@ -10997,7 +11054,7 @@ static int bnx2x_set_uc_list(struct bnx2x *bp)
int rc;
struct net_device *dev = bp->dev;
struct netdev_hw_addr *ha;
- struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+ struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
unsigned long ramrod_flags = 0;
/* First schedule a cleanup up of old configuration */
@@ -11503,8 +11560,7 @@ static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
}
}
-/**
- * IRO array is stored in the following format:
+/* IRO array is stored in the following format:
* {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
*/
static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
@@ -11672,7 +11728,7 @@ void bnx2x__init_func_obj(struct bnx2x *bp)
/* must be called after sriov-enable */
static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
{
- int cid_count = BNX2X_L2_CID_COUNT(bp);
+ int cid_count = BNX2X_L2_MAX_CID(bp);
#ifdef BCM_CNIC
cid_count += CNIC_CID_MAX;
@@ -11717,7 +11773,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
struct bnx2x *bp;
int pcie_width, pcie_speed;
int rc, max_non_def_sbs;
- int rx_count, tx_count, rss_count;
+ int rx_count, tx_count, rss_count, doorbell_size;
/*
* An estimated maximum supported CoS number according to the chip
* version.
@@ -11760,13 +11816,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
- /* !!! FIXME !!!
- * Do not allow the maximum SB count to grow above 16
- * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48.
- * We will use the FP_SB_MAX_E1x macro for this matter.
- */
- max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs);
-
WARN_ON(!max_non_def_sbs);
/* Maximum number of RSS queues: one IGU SB goes to CNIC */
@@ -11777,9 +11826,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/*
* Maximum number of netdev Tx queues:
- * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
+ * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
*/
- tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT;
+ tx_count = rss_count * max_cos_est + FCOE_PRESENT;
/* dev zeroed in init_etherdev */
dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
@@ -11788,9 +11837,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
bp = netdev_priv(dev);
- BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
- tx_count, rx_count);
-
bp->igu_sb_cnt = max_non_def_sbs;
bp->msg_enable = debug;
pci_set_drvdata(pdev, dev);
@@ -11803,6 +11849,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
+ BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
+ tx_count, rx_count);
+
rc = bnx2x_init_bp(bp);
if (rc)
goto init_one_exit;
@@ -11811,9 +11860,15 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
* Map doorbels here as we need the real value of bp->max_cos which
* is initialized in bnx2x_init_bp().
*/
+ doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
+ if (doorbell_size > pci_resource_len(pdev, 2)) {
+ dev_err(&bp->pdev->dev,
+ "Cannot map doorbells, bar size too small, aborting\n");
+ rc = -ENOMEM;
+ goto init_one_exit;
+ }
bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
- min_t(u64, BNX2X_DB_SIZE(bp),
- pci_resource_len(pdev, 2)));
+ doorbell_size);
if (!bp->doorbells) {
dev_err(&bp->pdev->dev,
"Cannot map doorbell space, aborting\n");
@@ -11831,8 +11886,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
#endif
+
+ /* Set bp->num_queues for MSI-X mode*/
+ bnx2x_set_num_queues(bp);
+
/* Configure interrupt mode: try to enable MSI-X/MSI if
- * needed, set bp->num_queues appropriately.
+ * needed.
*/
bnx2x_set_int_mode(bp);
@@ -12176,6 +12235,7 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
{
struct eth_spe *spe;
+ int cxt_index, cxt_offset;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -12198,10 +12258,16 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
* ramrod
*/
if (type == ETH_CONNECTION_TYPE) {
- if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
- bnx2x_set_ctx_validation(bp, &bp->context.
- vcxt[BNX2X_ISCSI_ETH_CID].eth,
- BNX2X_ISCSI_ETH_CID);
+ if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
+ cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
+ ILT_PAGE_CIDS;
+ cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
+ (cxt_index * ILT_PAGE_CIDS);
+ bnx2x_set_ctx_validation(bp,
+ &bp->context[cxt_index].
+ vcxt[cxt_offset].eth,
+ BNX2X_ISCSI_ETH_CID(bp));
+ }
}
/*
@@ -12488,21 +12554,45 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
break;
}
case DRV_CTL_ULP_REGISTER_CMD: {
- int ulp_type = ctl->data.ulp_type;
+ int ulp_type = ctl->data.register_data.ulp_type;
if (CHIP_IS_E3(bp)) {
int idx = BP_FW_MB_IDX(bp);
- u32 cap;
+ u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+ int path = BP_PATH(bp);
+ int port = BP_PORT(bp);
+ int i;
+ u32 scratch_offset;
+ u32 *host_addr;
- cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+ /* first write capability to shmem2 */
if (ulp_type == CNIC_ULP_ISCSI)
cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
else if (ulp_type == CNIC_ULP_FCOE)
cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+
+ if ((ulp_type != CNIC_ULP_FCOE) ||
+ (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
+ (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
+ break;
+
+ /* if reached here - should write fcoe capabilities */
+ scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
+ if (!scratch_offset)
+ break;
+ scratch_offset += offsetof(struct glob_ncsi_oem_data,
+ fcoe_features[path][port]);
+ host_addr = (u32 *) &(ctl->data.register_data.
+ fcoe_features);
+ for (i = 0; i < sizeof(struct fcoe_capabilities);
+ i += 4)
+ REG_WR(bp, scratch_offset + i,
+ *(host_addr + i/4));
}
break;
}
+
case DRV_CTL_ULP_UNREGISTER_CMD: {
int ulp_type = ctl->data.ulp_type;
@@ -12554,6 +12644,21 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
cp->num_irq = 2;
}
+void bnx2x_setup_cnic_info(struct bnx2x *bp)
+{
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+
+ cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
+ bnx2x_cid_ilt_lines(bp);
+ cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
+ cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
+ cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
+
+ if (NO_ISCSI_OOO(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+}
+
static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
void *data)
{
@@ -12632,10 +12737,10 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
cp->drv_ctl = bnx2x_drv_ctl;
cp->drv_register_cnic = bnx2x_register_cnic;
cp->drv_unregister_cnic = bnx2x_unregister_cnic;
- cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
+ cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
cp->iscsi_l2_client_id =
bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
- cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
+ cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
if (NO_ISCSI_OOO(bp))
cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
new file mode 100644
index 0000000..ddd5106
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
@@ -0,0 +1,168 @@
+/* bnx2x_mfw_req.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNX2X_MFW_REQ_H
+#define BNX2X_MFW_REQ_H
+
+#define PORT_0 0
+#define PORT_1 1
+#define PORT_MAX 2
+#define NVM_PATH_MAX 2
+
+/* FCoE capabilities required from the driver */
+struct fcoe_capabilities {
+ u32 capability1;
+ /* Maximum number of I/Os per connection */
+ #define FCOE_IOS_PER_CONNECTION_MASK 0x0000ffff
+ #define FCOE_IOS_PER_CONNECTION_SHIFT 0
+ /* Maximum number of Logins per port */
+ #define FCOE_LOGINS_PER_PORT_MASK 0xffff0000
+ #define FCOE_LOGINS_PER_PORT_SHIFT 16
+
+ u32 capability2;
+ /* Maximum number of exchanges */
+ #define FCOE_NUMBER_OF_EXCHANGES_MASK 0x0000ffff
+ #define FCOE_NUMBER_OF_EXCHANGES_SHIFT 0
+ /* Maximum NPIV WWN per port */
+ #define FCOE_NPIV_WWN_PER_PORT_MASK 0xffff0000
+ #define FCOE_NPIV_WWN_PER_PORT_SHIFT 16
+
+ u32 capability3;
+ /* Maximum number of targets supported */
+ #define FCOE_TARGETS_SUPPORTED_MASK 0x0000ffff
+ #define FCOE_TARGETS_SUPPORTED_SHIFT 0
+ /* Maximum number of outstanding commands across all connections */
+ #define FCOE_OUTSTANDING_COMMANDS_MASK 0xffff0000
+ #define FCOE_OUTSTANDING_COMMANDS_SHIFT 16
+
+ u32 capability4;
+ #define FCOE_CAPABILITY4_STATEFUL 0x00000001
+ #define FCOE_CAPABILITY4_STATELESS 0x00000002
+ #define FCOE_CAPABILITY4_CAPABILITIES_REPORTED_VALID 0x00000004
+};
+
+struct glob_ncsi_oem_data {
+ u32 driver_version;
+ u32 unused[3];
+ struct fcoe_capabilities fcoe_features[NVM_PATH_MAX][PORT_MAX];
+};
+
+/* current drv_info version */
+#define DRV_INFO_CUR_VER 2
+
+/* drv_info op codes supported */
+enum drv_info_opcode {
+ ETH_STATS_OPCODE,
+ FCOE_STATS_OPCODE,
+ ISCSI_STATS_OPCODE
+};
+
+#define ETH_STAT_INFO_VERSION_LEN 12
+/* Per PCI Function Ethernet Statistics required from the driver */
+struct eth_stats_info {
+ /* Function's Driver Version. padded to 12 */
+ u8 version[ETH_STAT_INFO_VERSION_LEN];
+ /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
+ u8 mac_local[8];
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ u32 mtu_size; /* MTU Size. Note : Negotiated MTU */
+ u32 feature_flags; /* Feature_Flags. */
+#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01
+#define FEATURE_ETH_LSO_MASK 0x02
+#define FEATURE_ETH_BOOTMODE_MASK 0x1C
+#define FEATURE_ETH_BOOTMODE_SHIFT 2
+#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2)
+#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2)
+#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2)
+#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2)
+#define FEATURE_ETH_TOE_MASK 0x20
+ u32 lso_max_size; /* LSO MaxOffloadSize. */
+ u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */
+ /* Num Offloaded Connections TCP_IPv4. */
+ u32 ipv4_ofld_cnt;
+ /* Num Offloaded Connections TCP_IPv6. */
+ u32 ipv6_ofld_cnt;
+ u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */
+ u32 txq_size; /* TX Descriptors Queue Size */
+ u32 rxq_size; /* RX Descriptors Queue Size */
+ /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
+ u32 txq_avg_depth;
+ /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
+ u32 rxq_avg_depth;
+ /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
+ u32 iov_offload;
+ /* Number of NetQueue/VMQ Config'd. */
+ u32 netq_cnt;
+ u32 vf_cnt; /* Num VF assigned to this PF. */
+};
+
+/* Per PCI Function FCOE Statistics required from the driver */
+struct fcoe_stats_info {
+ u8 version[12]; /* Function's Driver Version. */
+ u8 mac_local[8]; /* Locally Admin Addr. */
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ u32 qos_priority;
+ u32 txq_size; /* FCoE TX Descriptors Queue Size. */
+ u32 rxq_size; /* FCoE RX Descriptors Queue Size. */
+ /* FCoE TX Descriptor Queue Avg Depth. */
+ u32 txq_avg_depth;
+ /* FCoE RX Descriptors Queue Avg Depth. */
+ u32 rxq_avg_depth;
+ u32 rx_frames_lo; /* FCoE RX Frames received. */
+ u32 rx_frames_hi; /* FCoE RX Frames received. */
+ u32 rx_bytes_lo; /* FCoE RX Bytes received. */
+ u32 rx_bytes_hi; /* FCoE RX Bytes received. */
+ u32 tx_frames_lo; /* FCoE TX Frames sent. */
+ u32 tx_frames_hi; /* FCoE TX Frames sent. */
+ u32 tx_bytes_lo; /* FCoE TX Bytes sent. */
+ u32 tx_bytes_hi; /* FCoE TX Bytes sent. */
+};
+
+/* Per PCI Function iSCSI Statistics required from the driver*/
+struct iscsi_stats_info {
+ u8 version[12]; /* Function's Driver Version. */
+ u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ u32 qos_priority;
+ u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */
+ u8 ww_port_name[64]; /* iSCSI World wide port name */
+ u8 boot_target_name[64];/* iSCSI Boot Target Name. */
+ u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */
+ u32 boot_target_portal; /* iSCSI Boot Target Portal. */
+ u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */
+ u32 max_frame_size; /* Max Frame Size. bytes */
+ u32 txq_size; /* PDU TX Descriptors Queue Size. */
+ u32 rxq_size; /* PDU RX Descriptors Queue Size. */
+ u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */
+ u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */
+ u32 rx_pdus_lo; /* iSCSI PDUs received. */
+ u32 rx_pdus_hi; /* iSCSI PDUs received. */
+ u32 rx_bytes_lo; /* iSCSI RX Bytes received. */
+ u32 rx_bytes_hi; /* iSCSI RX Bytes received. */
+ u32 tx_pdus_lo; /* iSCSI PDUs sent. */
+ u32 tx_pdus_hi; /* iSCSI PDUs sent. */
+ u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */
+ u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */
+ u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable.
+ * 9 nibbles, the position of each nibble
+ * represents the C-PCP value, the value
+ * of the nibble = S-PCP value.
+ */
+};
+
+union drv_info_to_mcp {
+ struct eth_stats_info ether_stat;
+ struct fcoe_stats_info fcoe_stat;
+ struct iscsi_stats_info iscsi_stat;
+};
+#endif /* BNX2X_MFW_REQ_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index bbd3874..ec62a5c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1488,6 +1488,121 @@
* 2:1 - otp_misc_do[51:50]; 0 - otp_misc_do[1]. */
#define MISC_REG_CHIP_TYPE 0xac60
#define MISC_REG_CHIP_TYPE_57811_MASK (1<<1)
+#define MISC_REG_CPMU_LP_DR_ENABLE 0xa858
+/* [RW 1] FW EEE LPI Enable. When 1 indicates that EEE LPI mode is enabled
+ * by FW. When 0 indicates that the EEE LPI mode is disabled by FW. Clk
+ * 25MHz. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_FW_ENABLE_P0 0xa84c
+/* [RW 32] EEE LPI Idle Threshold. The threshold value for the idle EEE LPI
+ * counter. Timer tick is 1 us. Clock 25MHz. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_IDLE_THR_P0 0xa8a0
+/* [RW 18] LPI entry events mask. [0] - Vmain SM Mask. When 1 indicates that
+ * the Vmain SM end state is disabled. When 0 indicates that the Vmain SM
+ * end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates that
+ * the FW command that all Queues are empty is disabled. When 0 indicates
+ * that the FW command that all Queues are empty is enabled. [2] - FW Early
+ * Exit Mask / Reserved (Entry mask). When 1 indicates that the FW Early
+ * Exit command is disabled. When 0 indicates that the FW Early Exit command
+ * is enabled. This bit applicable only in the EXIT Events Mask registers.
+ * [3] - PBF Request Mask. When 1 indicates that the PBF Request indication
+ * is disabled. When 0 indicates that the PBF Request indication is enabled.
+ * [4] - Tx Request Mask. When =1 indicates that the Tx other Than PBF
+ * Request indication is disabled. When 0 indicates that the Tx Other Than
+ * PBF Request indication is enabled. [5] - Rx EEE LPI Status Mask. When 1
+ * indicates that the RX EEE LPI Status indication is disabled. When 0
+ * indicates that the RX EEE LPI Status indication is enabled. In the EXIT
+ * Events Masks registers; this bit masks the falling edge detect of the LPI
+ * Status (Rx LPI is on - off). [6] - Tx Pause Mask. When 1 indicates that
+ * the Tx Pause indication is disabled. When 0 indicates that the Tx Pause
+ * indication is enabled. [7] - BRB1 Empty Mask. When 1 indicates that the
+ * BRB1 EMPTY indication is disabled. When 0 indicates that the BRB1 EMPTY
+ * indication is enabled. [8] - QM Idle Mask. When 1 indicates that the QM
+ * IDLE indication is disabled. When 0 indicates that the QM IDLE indication
+ * is enabled. (One bit for both VOQ0 and VOQ1). [9] - QM LB Idle Mask. When
+ * 1 indicates that the QM IDLE indication for LOOPBACK is disabled. When 0
+ * indicates that the QM IDLE indication for LOOPBACK is enabled. [10] - L1
+ * Status Mask. When 1 indicates that the L1 Status indication from the PCIE
+ * CORE is disabled. When 0 indicates that the RX EEE LPI Status indication
+ * from the PCIE CORE is enabled. In the EXIT Events Masks registers; this
+ * bit masks the falling edge detect of the L1 status (L1 is on - off). [11]
+ * - P0 E0 EEE EEE LPI REQ Mask. When =1 indicates that the P0 E0 EEE EEE
+ * LPI REQ indication is disabled. When =0 indicates that the P0 E0 EEE LPI
+ * REQ indication is enabled. [12] - P1 E0 EEE LPI REQ Mask. When =1
+ * indicates that the P0 EEE LPI REQ indication is disabled. When =0
+ * indicates that the P0 EEE LPI REQ indication is enabled. [13] - P0 E1 EEE
+ * LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication is
+ * disabled. When =0 indicates that the P0 EEE LPI REQ indication is
+ * enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE
+ * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ
+ * indication is enabled. [15] - L1 REQ Mask. When =1 indicates that the L1
+ * REQ indication is disabled. When =0 indicates that the L1 indication is
+ * enabled. [16] - Rx EEE LPI Status Edge Detect Mask. When =1 indicates
+ * that the RX EEE LPI Status Falling Edge Detect indication is disabled (Rx
+ * EEE LPI is on - off). When =0 indicates that the RX EEE LPI Status
+ * Falling Edge Detec indication is enabled (Rx EEE LPI is on - off). This
+ * bit is applicable only in the EXIT Events Masks registers. [17] - L1
+ * Status Edge Detect Mask. When =1 indicates that the L1 Status Falling
+ * Edge Detect indication from the PCIE CORE is disabled (L1 is on - off).
+ * When =0 indicates that the L1 Status Falling Edge Detect indication from
+ * the PCIE CORE is enabled (L1 is on - off). This bit is applicable only in
+ * the EXIT Events Masks registers. Clock 25MHz. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_MASK_ENT_P0 0xa880
+/* [RW 18] EEE LPI exit events mask. [0] - Vmain SM Mask. When 1 indicates
+ * that the Vmain SM end state is disabled. When 0 indicates that the Vmain
+ * SM end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates
+ * that the FW command that all Queues are empty is disabled. When 0
+ * indicates that the FW command that all Queues are empty is enabled. [2] -
+ * FW Early Exit Mask / Reserved (Entry mask). When 1 indicates that the FW
+ * Early Exit command is disabled. When 0 indicates that the FW Early Exit
+ * command is enabled. This bit applicable only in the EXIT Events Mask
+ * registers. [3] - PBF Request Mask. When 1 indicates that the PBF Request
+ * indication is disabled. When 0 indicates that the PBF Request indication
+ * is enabled. [4] - Tx Request Mask. When =1 indicates that the Tx other
+ * Than PBF Request indication is disabled. When 0 indicates that the Tx
+ * Other Than PBF Request indication is enabled. [5] - Rx EEE LPI Status
+ * Mask. When 1 indicates that the RX EEE LPI Status indication is disabled.
+ * When 0 indicates that the RX LPI Status indication is enabled. In the
+ * EXIT Events Masks registers; this bit masks the falling edge detect of
+ * the EEE LPI Status (Rx EEE LPI is on - off). [6] - Tx Pause Mask. When 1
+ * indicates that the Tx Pause indication is disabled. When 0 indicates that
+ * the Tx Pause indication is enabled. [7] - BRB1 Empty Mask. When 1
+ * indicates that the BRB1 EMPTY indication is disabled. When 0 indicates
+ * that the BRB1 EMPTY indication is enabled. [8] - QM Idle Mask. When 1
+ * indicates that the QM IDLE indication is disabled. When 0 indicates that
+ * the QM IDLE indication is enabled. (One bit for both VOQ0 and VOQ1). [9]
+ * - QM LB Idle Mask. When 1 indicates that the QM IDLE indication for
+ * LOOPBACK is disabled. When 0 indicates that the QM IDLE indication for
+ * LOOPBACK is enabled. [10] - L1 Status Mask. When 1 indicates that the L1
+ * Status indication from the PCIE CORE is disabled. When 0 indicates that
+ * the RX EEE LPI Status indication from the PCIE CORE is enabled. In the
+ * EXIT Events Masks registers; this bit masks the falling edge detect of
+ * the L1 status (L1 is on - off). [11] - P0 E0 EEE EEE LPI REQ Mask. When
+ * =1 indicates that the P0 E0 EEE EEE LPI REQ indication is disabled. When
+ * =0 indicates that the P0 E0 EEE LPI REQ indication is enabled. [12] - P1
+ * E0 EEE LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication
+ * is disabled. When =0 indicates that the P0 EEE LPI REQ indication is
+ * enabled. [13] - P0 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE
+ * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ
+ * indication is enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates
+ * that the P0 EEE LPI REQ indication is disabled. When =0 indicates that
+ * the P0 EEE LPI REQ indication is enabled. [15] - L1 REQ Mask. When =1
+ * indicates that the L1 REQ indication is disabled. When =0 indicates that
+ * the L1 indication is enabled. [16] - Rx EEE LPI Status Edge Detect Mask.
+ * When =1 indicates that the RX EEE LPI Status Falling Edge Detect
+ * indication is disabled (Rx EEE LPI is on - off). When =0 indicates that
+ * the RX EEE LPI Status Falling Edge Detec indication is enabled (Rx EEE
+ * LPI is on - off). This bit is applicable only in the EXIT Events Masks
+ * registers. [17] - L1 Status Edge Detect Mask. When =1 indicates that the
+ * L1 Status Falling Edge Detect indication from the PCIE CORE is disabled
+ * (L1 is on - off). When =0 indicates that the L1 Status Falling Edge
+ * Detect indication from the PCIE CORE is enabled (L1 is on - off). This
+ * bit is applicable only in the EXIT Events Masks registers.Clock 25MHz.
+ * Reset on hard reset. */
+#define MISC_REG_CPMU_LP_MASK_EXT_P0 0xa888
+/* [RW 16] EEE LPI Entry Events Counter. A statistic counter with the number
+ * of counts that the SM entered the EEE LPI state. Clock 25MHz. Read only
+ * register. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_SM_ENT_CNT_P0 0xa8b8
/* [RW 32] The following driver registers(1...16) represent 16 drivers and
32 clients. Each client can be controlled by one driver only. One in each
bit represent that this driver control the appropriate client (Ex: bit 5
@@ -5372,6 +5487,8 @@
/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
* packets transmitted by the MAC */
#define XMAC_REG_CTRL_SA_LO 0x28
+#define XMAC_REG_EEE_CTRL 0xd8
+#define XMAC_REG_EEE_TIMERS_HI 0xe4
#define XMAC_REG_PAUSE_CTRL 0x68
#define XMAC_REG_PFC_CTRL 0x70
#define XMAC_REG_PFC_CTRL_HI 0x74
@@ -5796,6 +5913,7 @@
#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
#define MISC_REGISTERS_SPIO_SET_POS 8
#define HW_LOCK_MAX_RESOURCE_VALUE 31
+#define HW_LOCK_RESOURCE_DCBX_ADMIN_MIB 13
#define HW_LOCK_RESOURCE_DRV_FLAGS 10
#define HW_LOCK_RESOURCE_GPIO 1
#define HW_LOCK_RESOURCE_MDIO 0
@@ -6813,6 +6931,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
#define MDIO_AN_REG_LP_AUTO_NEG2 0x0014
#define MDIO_AN_REG_MASTER_STATUS 0x0021
+#define MDIO_AN_REG_EEE_ADV 0x003c
+#define MDIO_AN_REG_LP_EEE_ADV 0x003d
/*bcm*/
#define MDIO_AN_REG_LINK_STATUS 0x8304
#define MDIO_AN_REG_CL37_CL73 0x8370
@@ -6866,6 +6986,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
/* BCM84833 only */
+#define MDIO_84833_TOP_CFG_FW_REV 0x400f
+#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1
#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
#define MDIO_84833_SUPER_ISOLATE 0x8000
/* These are mailbox register set used by 84833. */
@@ -6993,11 +7115,13 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_DIGITAL3_UP1 0x8329
#define MDIO_WC_REG_DIGITAL3_LP_UP1 0x832c
#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c
+#define MDIO_WC_REG_DIGITAL4_MISC5 0x833e
#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345
#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349
#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e
#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350
#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368
+#define MDIO_WC_REG_EEE_COMBO_CONTROL0 0x8390
#define MDIO_WC_REG_TX66_CONTROL 0x83b0
#define MDIO_WC_REG_RX66_CONTROL 0x83c0
#define MDIO_WC_REG_RX66_SCW0 0x83c2
@@ -7036,6 +7160,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_REG_GPHY_EEE_1G (0x1 << 2)
#define MDIO_REG_GPHY_EEE_100 (0x1 << 1)
#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e
+#define MDIO_REG_GPHY_AUX_STATUS 0x19
#define MDIO_REG_INTR_STATUS 0x1a
#define MDIO_REG_INTR_MASK 0x1b
#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
@@ -7150,8 +7275,7 @@ Theotherbitsarereservedandshouldbezero*/
#define CDU_REGION_NUMBER_UCM_AG 4
-/**
- * String-to-compress [31:8] = CID (all 24 bits)
+/* String-to-compress [31:8] = CID (all 24 bits)
* String-to-compress [7:4] = Region
* String-to-compress [3:0] = Type
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 6c14b4a..734fd87 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4107,6 +4107,10 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
data->capabilities |=
ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
+ if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
+
if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
data->capabilities |=
ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
@@ -4115,6 +4119,10 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
data->capabilities |=
ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
+ if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
+
/* Hashing mask */
data->rss_result_mask = p->rss_result_mask;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index efd80bd..f83e033 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -167,9 +167,8 @@ typedef int (*exe_q_remove)(struct bnx2x *bp,
union bnx2x_qable_obj *o,
struct bnx2x_exeq_elem *elem);
-/**
- * @return positive is entry was optimized, 0 - if not, negative
- * in case of an error.
+/* Return positive if entry was optimized, 0 - if not, negative
+ * in case of an error.
*/
typedef int (*exe_q_optimize)(struct bnx2x *bp,
union bnx2x_qable_obj *o,
@@ -694,8 +693,10 @@ enum {
BNX2X_RSS_IPV4,
BNX2X_RSS_IPV4_TCP,
+ BNX2X_RSS_IPV4_UDP,
BNX2X_RSS_IPV6,
BNX2X_RSS_IPV6_TCP,
+ BNX2X_RSS_IPV6_UDP,
};
struct bnx2x_config_rss_params {
@@ -729,6 +730,10 @@ struct bnx2x_rss_config_obj {
/* Last configured indirection table */
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ /* flags for enabling 4-tupple hash on UDP */
+ u8 udp_rss_v4;
+ u8 udp_rss_v6;
+
int (*config_rss)(struct bnx2x *bp,
struct bnx2x_config_rss_params *p);
};
@@ -1280,12 +1285,11 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
struct bnx2x_rx_mode_obj *o);
/**
- * Send and RX_MODE ramrod according to the provided parameters.
+ * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
*
- * @param bp
- * @param p Command parameters
+ * @p: Command parameters
*
- * @return 0 - if operation was successfull and there is no pending completions,
+ * Return: 0 - if operation was successfull and there is no pending completions,
* positive number - if there are pending completions,
* negative - if there were errors
*/
@@ -1302,7 +1306,11 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
bnx2x_obj_type type);
/**
- * Configure multicast MACs list. May configure a new list
+ * bnx2x_config_mcast - Configure multicast MACs list.
+ *
+ * @cmd: command to execute: BNX2X_MCAST_CMD_X
+ *
+ * May configure a new list
* provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up
* (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current
* configuration, continue to execute the pending commands
@@ -1313,11 +1321,7 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
* the current command will be enqueued to the tail of the
* pending commands list.
*
- * @param bp
- * @param p
- * @param command to execute: BNX2X_MCAST_CMD_X
- *
- * @return 0 is operation was sucessfull and there are no pending completions,
+ * Return: 0 is operation was sucessfull and there are no pending completions,
* negative if there were errors, positive if there are pending
* completions.
*/
@@ -1342,21 +1346,17 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
bnx2x_obj_type type);
/**
- * Updates RSS configuration according to provided parameters.
- *
- * @param bp
- * @param p
+ * bnx2x_config_rss - Updates RSS configuration according to provided parameters
*
- * @return 0 in case of success
+ * Return: 0 in case of success
*/
int bnx2x_config_rss(struct bnx2x *bp,
struct bnx2x_config_rss_params *p);
/**
- * Return the current ind_table configuration.
+ * bnx2x_get_rss_ind_table - Return the current ind_table configuration.
*
- * @param bp
- * @param ind_table buffer to fill with the current indirection
+ * @ind_table: buffer to fill with the current indirection
* table content. Should be at least
* T_ETH_INDIRECTION_TABLE_SIZE bytes long.
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 1e2785c..667d890 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -785,6 +785,10 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
pstats->host_port_stats_counter++;
+ if (CHIP_IS_E3(bp))
+ estats->eee_tx_lpi += REG_RD(bp,
+ MISC_REG_CPMU_LP_SM_ENT_CNT_P0);
+
if (!BP_NOMCP(bp)) {
u32 nig_timer_max =
SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
@@ -855,17 +859,22 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
struct tstorm_per_queue_stats *tclient =
&bp->fw_stats_data->queue_stats[i].
tstorm_queue_statistics;
- struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
+ struct tstorm_per_queue_stats *old_tclient =
+ &bnx2x_fp_stats(bp, fp)->old_tclient;
struct ustorm_per_queue_stats *uclient =
&bp->fw_stats_data->queue_stats[i].
ustorm_queue_statistics;
- struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
+ struct ustorm_per_queue_stats *old_uclient =
+ &bnx2x_fp_stats(bp, fp)->old_uclient;
struct xstorm_per_queue_stats *xclient =
&bp->fw_stats_data->queue_stats[i].
xstorm_queue_statistics;
- struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
- struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
- struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
+ struct xstorm_per_queue_stats *old_xclient =
+ &bnx2x_fp_stats(bp, fp)->old_xclient;
+ struct bnx2x_eth_q_stats *qstats =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats;
+ struct bnx2x_eth_q_stats_old *qstats_old =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
u32 diff;
@@ -1048,8 +1057,11 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
tmp = estats->mac_discard;
- for_each_rx_queue(bp, i)
- tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
+ for_each_rx_queue(bp, i) {
+ struct tstorm_per_queue_stats *old_tclient =
+ &bp->fp_stats[i].old_tclient;
+ tmp += le32_to_cpu(old_tclient->checksum_discard);
+ }
nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
nstats->tx_dropped = 0;
@@ -1099,9 +1111,9 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp)
int i;
for_each_queue(bp, i) {
- struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
+ struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
struct bnx2x_eth_q_stats_old *qstats_old =
- &bp->fp[i].eth_q_stats_old;
+ &bp->fp_stats[i].eth_q_stats_old;
UPDATE_ESTAT_QSTAT(driver_xoff);
UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
@@ -1309,12 +1321,9 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
bnx2x_stats_comp(bp);
}
-/**
- * This function will prepare the statistics ramrod data the way
+/* This function will prepare the statistics ramrod data the way
* we will only have to increment the statistics counter and
* send the ramrod each time we have to.
- *
- * @param bp
*/
static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
{
@@ -1428,7 +1437,7 @@ static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
query[first_queue_query_index + i];
cur_query_entry->kind = STATS_TYPE_QUEUE;
- cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]);
+ cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]);
cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
cur_query_entry->address.hi =
cpu_to_le32(U64_HI(cur_data_offset));
@@ -1479,15 +1488,19 @@ void bnx2x_stats_init(struct bnx2x *bp)
/* function stats */
for_each_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
-
- memset(&fp->old_tclient, 0, sizeof(fp->old_tclient));
- memset(&fp->old_uclient, 0, sizeof(fp->old_uclient));
- memset(&fp->old_xclient, 0, sizeof(fp->old_xclient));
+ struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
+
+ memset(&fp_stats->old_tclient, 0,
+ sizeof(fp_stats->old_tclient));
+ memset(&fp_stats->old_uclient, 0,
+ sizeof(fp_stats->old_uclient));
+ memset(&fp_stats->old_xclient, 0,
+ sizeof(fp_stats->old_xclient));
if (bp->stats_init) {
- memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats));
- memset(&fp->eth_q_stats_old, 0,
- sizeof(fp->eth_q_stats_old));
+ memset(&fp_stats->eth_q_stats, 0,
+ sizeof(fp_stats->eth_q_stats));
+ memset(&fp_stats->eth_q_stats_old, 0,
+ sizeof(fp_stats->eth_q_stats_old));
}
}
@@ -1529,8 +1542,10 @@ void bnx2x_save_statistics(struct bnx2x *bp)
/* save queue statistics */
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
- struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
+ struct bnx2x_eth_q_stats *qstats =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats;
+ struct bnx2x_eth_q_stats_old *qstats_old =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
@@ -1569,7 +1584,7 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
struct bnx2x_eth_stats *estats = &bp->eth_stats;
struct per_queue_stats *fcoe_q_stats =
- &bp->fw_stats_data->queue_stats[FCOE_IDX];
+ &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)];
struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
&fcoe_q_stats->tstorm_queue_statistics;
@@ -1586,8 +1601,7 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
memset(afex_stats, 0, sizeof(struct afex_stats));
for_each_eth_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
- struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+ struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
ADD_64(afex_stats->rx_unicast_bytes_hi,
qstats->total_unicast_bytes_received_hi,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 93e689fd..24b8e50 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -203,6 +203,8 @@ struct bnx2x_eth_stats {
/* Recovery */
u32 recoverable_error;
u32 unrecoverable_error;
+ /* src: Clear-on-Read register; Will not survive PMF Migration */
+ u32 eee_tx_lpi;
};
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index c95e7b5..3b4fc61 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -256,11 +256,16 @@ static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
+ struct fcoe_capabilities *fcoe_cap =
+ &info.data.register_data.fcoe_features;
- if (reg)
+ if (reg) {
info.cmd = DRV_CTL_ULP_REGISTER_CMD;
- else
+ if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
+ memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
+ } else {
info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
+ }
info.data.ulp_type = ulp_type;
ethdev->drv_ctl(dev->netdev, &info);
@@ -286,6 +291,9 @@ static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
{
u32 i;
+ if (!cp->ctx_tbl)
+ return -EINVAL;
+
for (i = 0; i < cp->max_cid_space; i++) {
if (cp->ctx_tbl[i].cid == cid) {
*l5_cid = i;
@@ -534,7 +542,8 @@ int cnic_unregister_driver(int ulp_type)
}
if (atomic_read(&ulp_ops->ref_count) != 0)
- netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
+ pr_warn("%s: Failed waiting for ref count to go to zero\n",
+ __func__);
return 0;
out_unlock:
@@ -611,6 +620,8 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
if (ulp_type == CNIC_ULP_ISCSI)
cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+ else if (ulp_type == CNIC_ULP_FCOE)
+ dev->fcoe_cap = NULL;
synchronize_rcu();
@@ -1053,12 +1064,13 @@ static int cnic_init_uio(struct cnic_dev *dev)
uinfo = &udev->cnic_uinfo;
- uinfo->mem[0].addr = dev->netdev->base_addr;
+ uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
uinfo->mem[0].internal_addr = dev->regview;
- uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
uinfo->mem[0].memtype = UIO_MEM_PHYS;
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+ uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
+ TX_MAX_TSS_RINGS + 1);
uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
PAGE_MASK;
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
@@ -1068,6 +1080,8 @@ static int cnic_init_uio(struct cnic_dev *dev)
uinfo->name = "bnx2_cnic";
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+ uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
+
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
PAGE_MASK;
uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
@@ -2585,7 +2599,7 @@ static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
return;
}
- cqes[0] = (struct kcqe *) &kcqe;
+ cqes[0] = &kcqe;
cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
}
@@ -3213,6 +3227,9 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
u32 l5_cid;
struct cnic_local *cp = dev->cnic_priv;
+ if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+ break;
+
if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
@@ -3943,6 +3960,15 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
cnic_cm_upcall(cp, csk, opcode);
break;
+ case L5CM_RAMROD_CMD_ID_CLOSE:
+ if (l4kcqe->status != 0) {
+ netdev_warn(dev->netdev, "RAMROD CLOSE compl with "
+ "status 0x%x\n", l4kcqe->status);
+ opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
+ /* Fall through */
+ } else {
+ break;
+ }
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
@@ -4246,8 +4272,6 @@ static int cnic_cm_shutdown(struct cnic_dev *dev)
struct cnic_local *cp = dev->cnic_priv;
int i;
- cp->stop_cm(dev);
-
if (!cp->csk_tbl)
return 0;
@@ -4665,9 +4689,9 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
cp->kcq1.sw_prod_idx = 0;
cp->kcq1.hw_prod_idx_ptr =
- (u16 *) &sblk->status_completion_producer_index;
+ &sblk->status_completion_producer_index;
- cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
+ cp->kcq1.status_idx_ptr = &sblk->status_idx;
/* Initialize the kernel complete queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
@@ -4693,9 +4717,9 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
cp->kcq1.hw_prod_idx_ptr =
- (u16 *) &msblk->status_completion_producer_index;
- cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
- cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
+ &msblk->status_completion_producer_index;
+ cp->kcq1.status_idx_ptr = &msblk->status_idx;
+ cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
@@ -4977,8 +5001,14 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
cp->port_mode = CHIP_PORT_MODE_NONE;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
- u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
+ u32 val;
+ pci_read_config_dword(dev->pcidev, PCICFG_ME_REGISTER, &val);
+ cp->func = (u8) ((val & ME_REG_ABS_PF_NUM) >>
+ ME_REG_ABS_PF_NUM_SHIFT);
+ func = CNIC_FUNC(cp);
+
+ val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
if (!(val & 1))
val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
else
@@ -5283,6 +5313,7 @@ static void cnic_stop_hw(struct cnic_dev *dev)
i++;
}
cnic_shutdown_rings(dev);
+ cp->stop_cm(dev);
clear_bit(CNIC_F_CNIC_UP, &dev->flags);
RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
synchronize_rcu();
@@ -5512,9 +5543,7 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
rcu_read_unlock();
}
-/**
- * netdev event handler
- */
+/* netdev event handler */
static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 289274e..5cb8888 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -12,8 +12,10 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
-#define CNIC_MODULE_VERSION "2.5.10"
-#define CNIC_MODULE_RELDATE "March 21, 2012"
+#include "bnx2x/bnx2x_mfw_req.h"
+
+#define CNIC_MODULE_VERSION "2.5.12"
+#define CNIC_MODULE_RELDATE "June 29, 2012"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -131,6 +133,11 @@ struct drv_ctl_l2_ring {
u32 cid;
};
+struct drv_ctl_register_data {
+ int ulp_type;
+ struct fcoe_capabilities fcoe_features;
+};
+
struct drv_ctl_info {
int cmd;
union {
@@ -138,6 +145,7 @@ struct drv_ctl_info {
struct drv_ctl_io io;
struct drv_ctl_l2_ring ring;
int ulp_type;
+ struct drv_ctl_register_data register_data;
char bytes[MAX_DRV_CTL_DATA];
} data;
};
@@ -305,6 +313,7 @@ struct cnic_dev {
int max_rdma_conn;
union drv_info_to_mcp *stats_addr;
+ struct fcoe_capabilities *fcoe_cap;
void *cnic_priv;
};
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index edeeb51..6cbab03 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -9908,7 +9908,7 @@ static bool tg3_enable_msix(struct tg3 *tp)
int i, rc;
struct msix_entry msix_ent[tp->irq_max];
- tp->irq_cnt = num_online_cpus();
+ tp->irq_cnt = netif_get_num_default_rss_queues();
if (tp->irq_cnt > 1) {
/* We want as many rx rings enabled as there are cpus.
* In multiqueue MSI-X mode, the first MSI-X vector
@@ -14275,7 +14275,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
}
}
- if (tg3_flag(tp, 5755_PLUS))
+ if (tg3_flag(tp, 5755_PLUS) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
tg3_flag_set(tp, SHORT_DMA_BUG);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
index 689e5e1..550d252 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -52,13 +52,7 @@ bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg)
}
/**
- * bfa_cee_attr_meminfo()
- *
- * @brief Returns the size of the DMA memory needed by CEE attributes
- *
- * @param[in] void
- *
- * @return Size of DMA region
+ * bfa_cee_attr_meminfo - Returns the size of the DMA memory needed by CEE attributes
*/
static u32
bfa_cee_attr_meminfo(void)
@@ -66,13 +60,7 @@ bfa_cee_attr_meminfo(void)
return roundup(sizeof(struct bfa_cee_attr), BFA_DMA_ALIGN_SZ);
}
/**
- * bfa_cee_stats_meminfo()
- *
- * @brief Returns the size of the DMA memory needed by CEE stats
- *
- * @param[in] void
- *
- * @return Size of DMA region
+ * bfa_cee_stats_meminfo - Returns the size of the DMA memory needed by CEE stats
*/
static u32
bfa_cee_stats_meminfo(void)
@@ -81,14 +69,10 @@ bfa_cee_stats_meminfo(void)
}
/**
- * bfa_cee_get_attr_isr()
- *
- * @brief CEE ISR for get-attributes responses from f/w
- *
- * @param[in] cee - Pointer to the CEE module
- * status - Return status from the f/w
+ * bfa_cee_get_attr_isr - CEE ISR for get-attributes responses from f/w
*
- * @return void
+ * @cee: Pointer to the CEE module
+ * @status: Return status from the f/w
*/
static void
bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status)
@@ -105,14 +89,10 @@ bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status)
}
/**
- * bfa_cee_get_attr_isr()
- *
- * @brief CEE ISR for get-stats responses from f/w
+ * bfa_cee_get_attr_isr - CEE ISR for get-stats responses from f/w
*
- * @param[in] cee - Pointer to the CEE module
- * status - Return status from the f/w
- *
- * @return void
+ * @cee: Pointer to the CEE module
+ * @status: Return status from the f/w
*/
static void
bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status)
@@ -147,13 +127,7 @@ bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
}
/**
- * bfa_nw_cee_meminfo()
- *
- * @brief Returns the size of the DMA memory needed by CEE module
- *
- * @param[in] void
- *
- * @return Size of DMA region
+ * bfa_nw_cee_meminfo - Returns the size of the DMA memory needed by CEE module
*/
u32
bfa_nw_cee_meminfo(void)
@@ -162,15 +136,11 @@ bfa_nw_cee_meminfo(void)
}
/**
- * bfa_nw_cee_mem_claim()
- *
- * @brief Initialized CEE DMA Memory
- *
- * @param[in] cee CEE module pointer
- * dma_kva Kernel Virtual Address of CEE DMA Memory
- * dma_pa Physical Address of CEE DMA Memory
+ * bfa_nw_cee_mem_claim - Initialized CEE DMA Memory
*
- * @return void
+ * @cee: CEE module pointer
+ * @dma_kva: Kernel Virtual Address of CEE DMA Memory
+ * @dma_pa: Physical Address of CEE DMA Memory
*/
void
bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
@@ -185,13 +155,11 @@ bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
}
/**
- * bfa_cee_get_attr()
- *
- * @brief Send the request to the f/w to fetch CEE attributes.
+ * bfa_cee_get_attr - Send the request to the f/w to fetch CEE attributes.
*
- * @param[in] Pointer to the CEE module data structure.
+ * @cee: Pointer to the CEE module data structure.
*
- * @return Status
+ * Return: status
*/
enum bfa_status
bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
@@ -220,13 +188,7 @@ bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
}
/**
- * bfa_cee_isrs()
- *
- * @brief Handles Mail-box interrupts for CEE module.
- *
- * @param[in] Pointer to the CEE module data structure.
- *
- * @return void
+ * bfa_cee_isrs - Handles Mail-box interrupts for CEE module.
*/
static void
@@ -253,14 +215,9 @@ bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
}
/**
- * bfa_cee_notify()
- *
- * @brief CEE module heart-beat failure handler.
- * @brief CEE module IOC event handler.
- *
- * @param[in] IOC event type
+ * bfa_cee_notify - CEE module heart-beat failure handler.
*
- * @return void
+ * @event: IOC event type
*/
static void
@@ -307,17 +264,13 @@ bfa_cee_notify(void *arg, enum bfa_ioc_event event)
}
/**
- * bfa_nw_cee_attach()
- *
- * @brief CEE module-attach API
+ * bfa_nw_cee_attach - CEE module-attach API
*
- * @param[in] cee - Pointer to the CEE module data structure
- * ioc - Pointer to the ioc module data structure
- * dev - Pointer to the device driver module data structure
- * The device driver specific mbox ISR functions have
- * this pointer as one of the parameters.
- *
- * @return void
+ * @cee: Pointer to the CEE module data structure
+ * @ioc: Pointer to the ioc module data structure
+ * @dev: Pointer to the device driver module data structure.
+ * The device driver specific mbox ISR functions have
+ * this pointer as one of the parameters.
*/
void
bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h
index 3da1a94..ad004a4 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h
@@ -16,23 +16,18 @@
* www.brocade.com
*/
-/**
- * @file bfa_cs.h BFA common services
- */
+/* BFA common services */
#ifndef __BFA_CS_H__
#define __BFA_CS_H__
#include "cna.h"
-/**
- * @ BFA state machine interfaces
- */
+/* BFA state machine interfaces */
typedef void (*bfa_sm_t)(void *sm, int event);
-/**
- * oc - object class eg. bfa_ioc
+/* oc - object class eg. bfa_ioc
* st - state, eg. reset
* otype - object type, eg. struct bfa_ioc
* etype - object type, eg. enum ioc_event
@@ -45,9 +40,7 @@ typedef void (*bfa_sm_t)(void *sm, int event);
#define bfa_sm_get_state(_sm) ((_sm)->sm)
#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
-/**
- * For converting from state machine function to state encoding.
- */
+/* For converting from state machine function to state encoding. */
struct bfa_sm_table {
bfa_sm_t sm; /*!< state machine function */
int state; /*!< state machine encoding */
@@ -55,13 +48,10 @@ struct bfa_sm_table {
};
#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
-/**
- * State machine with entry actions.
- */
+/* State machine with entry actions. */
typedef void (*bfa_fsm_t)(void *fsm, int event);
-/**
- * oc - object class eg. bfa_ioc
+/* oc - object class eg. bfa_ioc
* st - state, eg. reset
* otype - object type, eg. struct bfa_ioc
* etype - object type, eg. enum ioc_event
@@ -90,9 +80,7 @@ bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm)
return smt[i].state;
}
-/**
- * @ Generic wait counter.
- */
+/* Generic wait counter. */
typedef void (*bfa_wc_resume_t) (void *cbarg);
@@ -116,9 +104,7 @@ bfa_wc_down(struct bfa_wc *wc)
wc->wc_resume(wc->wc_cbarg);
}
-/**
- * Initialize a waiting counter.
- */
+/* Initialize a waiting counter. */
static inline void
bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
{
@@ -128,9 +114,7 @@ bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
bfa_wc_up(wc);
}
-/**
- * Wait for counter to reach zero
- */
+/* Wait for counter to reach zero */
static inline void
bfa_wc_wait(struct bfa_wc *wc)
{
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index 48f8773..e423f82 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -26,13 +26,9 @@
#define BFA_STRING_32 32
#define BFA_VERSION_LEN 64
-/**
- * ---------------------- adapter definitions ------------
- */
+/* ---------------------- adapter definitions ------------ */
-/**
- * BFA adapter level attributes.
- */
+/* BFA adapter level attributes. */
enum {
BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
/*
@@ -74,18 +70,14 @@ struct bfa_adapter_attr {
u8 trunk_capable;
};
-/**
- * ---------------------- IOC definitions ------------
- */
+/* ---------------------- IOC definitions ------------ */
enum {
BFA_IOC_DRIVER_LEN = 16,
BFA_IOC_CHIP_REV_LEN = 8,
};
-/**
- * Driver and firmware versions.
- */
+/* Driver and firmware versions. */
struct bfa_ioc_driver_attr {
char driver[BFA_IOC_DRIVER_LEN]; /*!< driver name */
char driver_ver[BFA_VERSION_LEN]; /*!< driver version */
@@ -95,9 +87,7 @@ struct bfa_ioc_driver_attr {
char ob_ver[BFA_VERSION_LEN]; /*!< openboot version */
};
-/**
- * IOC PCI device attributes
- */
+/* IOC PCI device attributes */
struct bfa_ioc_pci_attr {
u16 vendor_id; /*!< PCI vendor ID */
u16 device_id; /*!< PCI device ID */
@@ -108,9 +98,7 @@ struct bfa_ioc_pci_attr {
char chip_rev[BFA_IOC_CHIP_REV_LEN]; /*!< chip revision */
};
-/**
- * IOC states
- */
+/* IOC states */
enum bfa_ioc_state {
BFA_IOC_UNINIT = 1, /*!< IOC is in uninit state */
BFA_IOC_RESET = 2, /*!< IOC is in reset state */
@@ -127,9 +115,7 @@ enum bfa_ioc_state {
BFA_IOC_HWFAIL = 13, /*!< PCI mapping doesn't exist */
};
-/**
- * IOC firmware stats
- */
+/* IOC firmware stats */
struct bfa_fw_ioc_stats {
u32 enable_reqs;
u32 disable_reqs;
@@ -139,9 +125,7 @@ struct bfa_fw_ioc_stats {
u32 unknown_reqs;
};
-/**
- * IOC driver stats
- */
+/* IOC driver stats */
struct bfa_ioc_drv_stats {
u32 ioc_isrs;
u32 ioc_enables;
@@ -157,9 +141,7 @@ struct bfa_ioc_drv_stats {
u32 rsvd;
};
-/**
- * IOC statistics
- */
+/* IOC statistics */
struct bfa_ioc_stats {
struct bfa_ioc_drv_stats drv_stats; /*!< driver IOC stats */
struct bfa_fw_ioc_stats fw_stats; /*!< firmware IOC stats */
@@ -171,9 +153,7 @@ enum bfa_ioc_type {
BFA_IOC_TYPE_LL = 3,
};
-/**
- * IOC attributes returned in queries
- */
+/* IOC attributes returned in queries */
struct bfa_ioc_attr {
enum bfa_ioc_type ioc_type;
enum bfa_ioc_state state; /*!< IOC state */
@@ -187,22 +167,16 @@ struct bfa_ioc_attr {
u8 rsvd[4]; /*!< 64bit align */
};
-/**
- * Adapter capability mask definition
- */
+/* Adapter capability mask definition */
enum {
BFA_CM_HBA = 0x01,
BFA_CM_CNA = 0x02,
BFA_CM_NIC = 0x04,
};
-/**
- * ---------------------- mfg definitions ------------
- */
+/* ---------------------- mfg definitions ------------ */
-/**
- * Checksum size
- */
+/* Checksum size */
#define BFA_MFG_CHKSUM_SIZE 16
#define BFA_MFG_PARTNUM_SIZE 14
@@ -213,8 +187,7 @@ enum {
#pragma pack(1)
-/**
- * @brief BFA adapter manufacturing block definition.
+/* BFA adapter manufacturing block definition.
*
* All numerical fields are in big-endian format.
*/
@@ -256,9 +229,7 @@ struct bfa_mfg_block {
#pragma pack()
-/**
- * ---------------------- pci definitions ------------
- */
+/* ---------------------- pci definitions ------------ */
/*
* PCI device ID information
@@ -275,9 +246,7 @@ enum {
#define bfa_asic_id_ctc(device) \
(bfa_asic_id_ct(device) || bfa_asic_id_ct2(device))
-/**
- * PCI sub-system device and vendor ID information
- */
+/* PCI sub-system device and vendor ID information */
enum {
BFA_PCI_FCOE_SSDEVICE_ID = 0x14,
BFA_PCI_CT2_SSID_FCoE = 0x22,
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
index 8ab33ee..b39c5f2 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
@@ -20,10 +20,7 @@
#include "bfa_defs.h"
-/**
- * @brief
- * FC physical port statistics.
- */
+/* FC physical port statistics. */
struct bfa_port_fc_stats {
u64 secs_reset; /*!< Seconds since stats is reset */
u64 tx_frames; /*!< Tx frames */
@@ -59,10 +56,7 @@ struct bfa_port_fc_stats {
u64 bbsc_link_resets; /*!< Credit Recovery-Link Resets */
};
-/**
- * @brief
- * Eth Physical Port statistics.
- */
+/* Eth Physical Port statistics. */
struct bfa_port_eth_stats {
u64 secs_reset; /*!< Seconds since stats is reset */
u64 frame_64; /*!< Frames 64 bytes */
@@ -108,10 +102,7 @@ struct bfa_port_eth_stats {
u64 tx_iscsi_zero_pause; /*!< Tx iSCSI zero pause */
};
-/**
- * @brief
- * Port statistics.
- */
+/* Port statistics. */
union bfa_port_stats_u {
struct bfa_port_fc_stats fc;
struct bfa_port_eth_stats eth;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
index 6681fe8..7fb396f 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
@@ -20,33 +20,23 @@
#include "bfa_defs.h"
-/**
- * Manufacturing block version
- */
+/* Manufacturing block version */
#define BFA_MFG_VERSION 3
#define BFA_MFG_VERSION_UNINIT 0xFF
-/**
- * Manufacturing block encrypted version
- */
+/* Manufacturing block encrypted version */
#define BFA_MFG_ENC_VER 2
-/**
- * Manufacturing block version 1 length
- */
+/* Manufacturing block version 1 length */
#define BFA_MFG_VER1_LEN 128
-/**
- * Manufacturing block header length
- */
+/* Manufacturing block header length */
#define BFA_MFG_HDR_LEN 4
#define BFA_MFG_SERIALNUM_SIZE 11
#define STRSZ(_n) (((_n) + 4) & ~3)
-/**
- * Manufacturing card type
- */
+/* Manufacturing card type */
enum {
BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */
BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */
@@ -70,9 +60,7 @@ enum {
#pragma pack(1)
-/**
- * Check if Mezz card
- */
+/* Check if Mezz card */
#define bfa_mfg_is_mezz(type) (( \
(type) == BFA_MFG_TYPE_JAYHAWK || \
(type) == BFA_MFG_TYPE_WANCHESE || \
@@ -127,9 +115,7 @@ do { \
} \
} while (0)
-/**
- * VPD data length
- */
+/* VPD data length */
#define BFA_MFG_VPD_LEN 512
#define BFA_MFG_VPD_LEN_INVALID 0
@@ -137,9 +123,7 @@ do { \
#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */
#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */
-/**
- * VPD vendor tag
- */
+/* VPD vendor tag */
enum {
BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */
BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */
@@ -151,8 +135,7 @@ enum {
BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */
};
-/**
- * @brief BFA adapter flash vpd data definition.
+/* BFA adapter flash vpd data definition.
*
* All numerical fields are in big-endian format.
*/
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
index 7c5fe6c..ea9af9a 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
@@ -18,8 +18,7 @@
#ifndef __BFA_DEFS_STATUS_H__
#define __BFA_DEFS_STATUS_H__
-/**
- * API status return values
+/* API status return values
*
* NOTE: The error msgs are auto generated from the comments. Only singe line
* comments are supported
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 0b640fa..959c58e 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -20,13 +20,9 @@
#include "bfi_reg.h"
#include "bfa_defs.h"
-/**
- * IOC local definitions
- */
+/* IOC local definitions */
-/**
- * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
- */
+/* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
#define bfa_ioc_firmware_lock(__ioc) \
((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
@@ -96,9 +92,7 @@ static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
-/**
- * IOC state machine definitions/declarations
- */
+/* IOC state machine definitions/declarations */
enum ioc_event {
IOC_E_RESET = 1, /*!< IOC reset request */
IOC_E_ENABLE = 2, /*!< IOC enable request */
@@ -148,9 +142,7 @@ static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
static void bfa_iocpf_stop(struct bfa_ioc *ioc);
-/**
- * IOCPF state machine events
- */
+/* IOCPF state machine events */
enum iocpf_event {
IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
@@ -166,9 +158,7 @@ enum iocpf_event {
IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */
};
-/**
- * IOCPF states
- */
+/* IOCPF states */
enum bfa_iocpf_state {
BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */
BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
@@ -215,21 +205,15 @@ static struct bfa_sm_table iocpf_sm_table[] = {
{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
};
-/**
- * IOC State Machine
- */
+/* IOC State Machine */
-/**
- * Beginning state. IOC uninit state.
- */
+/* Beginning state. IOC uninit state. */
static void
bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
{
}
-/**
- * IOC is in uninit state.
- */
+/* IOC is in uninit state. */
static void
bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -243,18 +227,14 @@ bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
}
}
-/**
- * Reset entry actions -- initialize state machine
- */
+/* Reset entry actions -- initialize state machine */
static void
bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
{
bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
}
-/**
- * IOC is in reset state.
- */
+/* IOC is in reset state. */
static void
bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -282,8 +262,7 @@ bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
bfa_iocpf_enable(ioc);
}
-/**
- * Host IOC function is being enabled, awaiting response from firmware.
+/* Host IOC function is being enabled, awaiting response from firmware.
* Semaphore is acquired.
*/
static void
@@ -325,9 +304,7 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
}
}
-/**
- * Semaphore should be acquired for version check.
- */
+/* Semaphore should be acquired for version check. */
static void
bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
{
@@ -336,9 +313,7 @@ bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
bfa_ioc_send_getattr(ioc);
}
-/**
- * IOC configuration in progress. Timer is active.
- */
+/* IOC configuration in progress. Timer is active. */
static void
bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -419,9 +394,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
bfa_iocpf_disable(ioc);
}
-/**
- * IOC is being disabled
- */
+/* IOC is being disabled */
static void
bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -449,9 +422,7 @@ bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
}
}
-/**
- * IOC disable completion entry.
- */
+/* IOC disable completion entry. */
static void
bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
{
@@ -485,9 +456,7 @@ bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
{
}
-/**
- * Hardware initialization retry.
- */
+/* Hardware initialization retry. */
static void
bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -534,9 +503,7 @@ bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
{
}
-/**
- * IOC failure.
- */
+/* IOC failure. */
static void
bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -568,9 +535,7 @@ bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
{
}
-/**
- * IOC failure.
- */
+/* IOC failure. */
static void
bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -593,13 +558,9 @@ bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
}
}
-/**
- * IOCPF State Machine
- */
+/* IOCPF State Machine */
-/**
- * Reset entry actions -- initialize state machine
- */
+/* Reset entry actions -- initialize state machine */
static void
bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
{
@@ -607,9 +568,7 @@ bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
iocpf->auto_recover = bfa_nw_auto_recover;
}
-/**
- * Beginning state. IOC is in reset state.
- */
+/* Beginning state. IOC is in reset state. */
static void
bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -626,9 +585,7 @@ bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
}
-/**
- * Semaphore should be acquired for version check.
- */
+/* Semaphore should be acquired for version check. */
static void
bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
{
@@ -636,9 +593,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
- * Awaiting h/w semaphore to continue with version check.
- */
+/* Awaiting h/w semaphore to continue with version check. */
static void
bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -683,9 +638,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
}
-/**
- * Notify enable completion callback
- */
+/* Notify enable completion callback */
static void
bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
{
@@ -698,9 +651,7 @@ bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
msecs_to_jiffies(BFA_IOC_TOV));
}
-/**
- * Awaiting firmware version match.
- */
+/* Awaiting firmware version match. */
static void
bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -727,18 +678,14 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
}
-/**
- * Request for semaphore.
- */
+/* Request for semaphore. */
static void
bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
{
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
- * Awaiting semaphore for h/w initialzation.
- */
+/* Awaiting semaphore for h/w initialzation. */
static void
bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -778,8 +725,7 @@ bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
bfa_ioc_reset(iocpf->ioc, false);
}
-/**
- * Hardware is being initialized. Interrupts are enabled.
+/* Hardware is being initialized. Interrupts are enabled.
* Holding hardware semaphore lock.
*/
static void
@@ -822,8 +768,7 @@ bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
bfa_ioc_send_enable(iocpf->ioc);
}
-/**
- * Host IOC function is being enabled, awaiting response from firmware.
+/* Host IOC function is being enabled, awaiting response from firmware.
* Semaphore is acquired.
*/
static void
@@ -896,9 +841,7 @@ bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
bfa_ioc_send_disable(iocpf->ioc);
}
-/**
- * IOC is being disabled
- */
+/* IOC is being disabled */
static void
bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -935,9 +878,7 @@ bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
- * IOC hb ack request is being removed.
- */
+/* IOC hb ack request is being removed. */
static void
bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -963,9 +904,7 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
}
-/**
- * IOC disable completion entry.
- */
+/* IOC disable completion entry. */
static void
bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
{
@@ -1000,9 +939,7 @@ bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
- * Hardware initialization failed.
- */
+/* Hardware initialization failed. */
static void
bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -1046,9 +983,7 @@ bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
{
}
-/**
- * Hardware initialization failed.
- */
+/* Hardware initialization failed. */
static void
bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -1084,9 +1019,7 @@ bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
- * IOC is in failed state.
- */
+/* IOC is in failed state. */
static void
bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -1134,10 +1067,7 @@ bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
{
}
-/**
- * @brief
- * IOC is in failed state.
- */
+/* IOC is in failed state. */
static void
bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -1151,13 +1081,9 @@ bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
}
-/**
- * BFA IOC private functions
- */
+/* BFA IOC private functions */
-/**
- * Notify common modules registered for notification.
- */
+/* Notify common modules registered for notification. */
static void
bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
{
@@ -1298,10 +1224,7 @@ bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
del_timer(&ioc->sem_timer);
}
-/**
- * @brief
- * Initialize LPU local memory (aka secondary memory / SRAM)
- */
+/* Initialize LPU local memory (aka secondary memory / SRAM) */
static void
bfa_ioc_lmem_init(struct bfa_ioc *ioc)
{
@@ -1366,9 +1289,7 @@ bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
}
-/**
- * Get driver and firmware versions.
- */
+/* Get driver and firmware versions. */
void
bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
{
@@ -1388,9 +1309,7 @@ bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
}
}
-/**
- * Returns TRUE if same.
- */
+/* Returns TRUE if same. */
bool
bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
{
@@ -1408,8 +1327,7 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
return true;
}
-/**
- * Return true if current running version is valid. Firmware signature and
+/* Return true if current running version is valid. Firmware signature and
* execution context (driver/bios) must match.
*/
static bool
@@ -1430,9 +1348,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
}
-/**
- * Conditionally flush any pending message from firmware at start.
- */
+/* Conditionally flush any pending message from firmware at start. */
static void
bfa_ioc_msgflush(struct bfa_ioc *ioc)
{
@@ -1443,9 +1359,6 @@ bfa_ioc_msgflush(struct bfa_ioc *ioc)
writel(1, ioc->ioc_regs.lpu_mbox_cmd);
}
-/**
- * @img ioc_init_logic.jpg
- */
static void
bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
{
@@ -1603,10 +1516,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc)
del_timer(&ioc->hb_timer);
}
-/**
- * @brief
- * Initiate a full firmware download.
- */
+/* Initiate a full firmware download. */
static void
bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
u32 boot_env)
@@ -1672,9 +1582,7 @@ bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
bfa_ioc_hwinit(ioc, force);
}
-/**
- * BFA ioc enable reply by firmware
- */
+/* BFA ioc enable reply by firmware */
static void
bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
u8 cap_bm)
@@ -1686,10 +1594,7 @@ bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
}
-/**
- * @brief
- * Update BFA configuration from firmware configuration.
- */
+/* Update BFA configuration from firmware configuration. */
static void
bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
{
@@ -1702,9 +1607,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
}
-/**
- * Attach time initialization of mbox logic.
- */
+/* Attach time initialization of mbox logic. */
static void
bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
{
@@ -1718,9 +1621,7 @@ bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
}
}
-/**
- * Mbox poll timer -- restarts any pending mailbox requests.
- */
+/* Mbox poll timer -- restarts any pending mailbox requests. */
static void
bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
{
@@ -1760,9 +1661,7 @@ bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
}
}
-/**
- * Cleanup any pending requests.
- */
+/* Cleanup any pending requests. */
static void
bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
{
@@ -1774,12 +1673,12 @@ bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
}
/**
- * Read data from SMEM to host through PCI memmap
+ * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap
*
- * @param[in] ioc memory for IOC
- * @param[in] tbuf app memory to store data from smem
- * @param[in] soff smem offset
- * @param[in] sz size of smem in bytes
+ * @ioc: memory for IOC
+ * @tbuf: app memory to store data from smem
+ * @soff: smem offset
+ * @sz: size of smem in bytes
*/
static int
bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
@@ -1826,9 +1725,7 @@ bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
return 0;
}
-/**
- * Retrieve saved firmware trace from a prior IOC failure.
- */
+/* Retrieve saved firmware trace from a prior IOC failure. */
int
bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
{
@@ -1844,9 +1741,7 @@ bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
return status;
}
-/**
- * Save firmware trace if configured.
- */
+/* Save firmware trace if configured. */
static void
bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
{
@@ -1861,9 +1756,7 @@ bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
}
}
-/**
- * Retrieve saved firmware trace from a prior IOC failure.
- */
+/* Retrieve saved firmware trace from a prior IOC failure. */
int
bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
{
@@ -1892,9 +1785,7 @@ bfa_ioc_fail_notify(struct bfa_ioc *ioc)
bfa_nw_ioc_debug_save_ftrc(ioc);
}
-/**
- * IOCPF to IOC interface
- */
+/* IOCPF to IOC interface */
static void
bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
{
@@ -1928,9 +1819,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
}
-/**
- * IOC public
- */
+/* IOC public */
static enum bfa_status
bfa_ioc_pll_init(struct bfa_ioc *ioc)
{
@@ -1954,8 +1843,7 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
return BFA_STATUS_OK;
}
-/**
- * Interface used by diag module to do firmware boot with memory test
+/* Interface used by diag module to do firmware boot with memory test
* as the entry vector.
*/
static void
@@ -1983,9 +1871,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
bfa_ioc_lpu_start(ioc);
}
-/**
- * Enable/disable IOC failure auto recovery.
- */
+/* Enable/disable IOC failure auto recovery. */
void
bfa_nw_ioc_auto_recover(bool auto_recover)
{
@@ -2056,10 +1942,10 @@ bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
}
/**
- * IOC attach time initialization and setup.
+ * bfa_nw_ioc_attach - IOC attach time initialization and setup.
*
- * @param[in] ioc memory for IOC
- * @param[in] bfa driver instance structure
+ * @ioc: memory for IOC
+ * @bfa: driver instance structure
*/
void
bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
@@ -2078,9 +1964,7 @@ bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
bfa_fsm_send_event(ioc, IOC_E_RESET);
}
-/**
- * Driver detach time IOC cleanup.
- */
+/* Driver detach time IOC cleanup. */
void
bfa_nw_ioc_detach(struct bfa_ioc *ioc)
{
@@ -2091,9 +1975,9 @@ bfa_nw_ioc_detach(struct bfa_ioc *ioc)
}
/**
- * Setup IOC PCI properties.
+ * bfa_nw_ioc_pci_init - Setup IOC PCI properties.
*
- * @param[in] pcidev PCI device information for this IOC
+ * @pcidev: PCI device information for this IOC
*/
void
bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
@@ -2160,10 +2044,10 @@ bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
}
/**
- * Initialize IOC dma memory
+ * bfa_nw_ioc_mem_claim - Initialize IOC dma memory
*
- * @param[in] dm_kva kernel virtual address of IOC dma memory
- * @param[in] dm_pa physical address of IOC dma memory
+ * @dm_kva: kernel virtual address of IOC dma memory
+ * @dm_pa: physical address of IOC dma memory
*/
void
bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
@@ -2176,9 +2060,7 @@ bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
ioc->attr = (struct bfi_ioc_attr *) dm_kva;
}
-/**
- * Return size of dma memory required.
- */
+/* Return size of dma memory required. */
u32
bfa_nw_ioc_meminfo(void)
{
@@ -2201,9 +2083,7 @@ bfa_nw_ioc_disable(struct bfa_ioc *ioc)
bfa_fsm_send_event(ioc, IOC_E_DISABLE);
}
-/**
- * Initialize memory for saving firmware trace.
- */
+/* Initialize memory for saving firmware trace. */
void
bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
{
@@ -2217,9 +2097,7 @@ bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
}
-/**
- * Register mailbox message handler function, to be called by common modules
- */
+/* Register mailbox message handler function, to be called by common modules */
void
bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
@@ -2231,11 +2109,12 @@ bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
}
/**
- * Queue a mailbox command request to firmware. Waits if mailbox is busy.
- * Responsibility of caller to serialize
+ * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware.
*
- * @param[in] ioc IOC instance
- * @param[i] cmd Mailbox command
+ * @ioc: IOC instance
+ * @cmd: Mailbox command
+ *
+ * Waits if mailbox is busy. Responsibility of caller to serialize
*/
bool
bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
@@ -2272,9 +2151,7 @@ bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
return false;
}
-/**
- * Handle mailbox interrupts
- */
+/* Handle mailbox interrupts */
void
bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
{
@@ -2314,9 +2191,7 @@ bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
bfa_fsm_send_event(ioc, IOC_E_HWERROR);
}
-/**
- * return true if IOC is disabled
- */
+/* return true if IOC is disabled */
bool
bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
{
@@ -2324,17 +2199,14 @@ bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
}
-/**
- * return true if IOC is operational
- */
+/* return true if IOC is operational */
bool
bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
{
return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
}
-/**
- * Add to IOC heartbeat failure notification queue. To be used by common
+/* Add to IOC heartbeat failure notification queue. To be used by common
* modules such as cee, port, diag.
*/
void
@@ -2518,9 +2390,7 @@ bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
}
-/**
- * WWN public
- */
+/* WWN public */
static u64
bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
{
@@ -2533,9 +2403,7 @@ bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
return ioc->attr->mac;
}
-/**
- * Firmware failure detected. Start recovery actions.
- */
+/* Firmware failure detected. Start recovery actions. */
static void
bfa_ioc_recover(struct bfa_ioc *ioc)
{
@@ -2545,10 +2413,7 @@ bfa_ioc_recover(struct bfa_ioc *ioc)
bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
}
-/**
- * @dg hal_iocpf_pvt BFA IOC PF private functions
- * @{
- */
+/* BFA IOC PF private functions */
static void
bfa_iocpf_enable(struct bfa_ioc *ioc)
@@ -2669,8 +2534,6 @@ bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
/*
* Send flash write request.
- *
- * @param[in] cbarg - callback argument
*/
static void
bfa_flash_write_send(struct bfa_flash *flash)
@@ -2699,10 +2562,10 @@ bfa_flash_write_send(struct bfa_flash *flash)
flash->offset += len;
}
-/*
- * Send flash read request.
+/**
+ * bfa_flash_read_send - Send flash read request.
*
- * @param[in] cbarg - callback argument
+ * @cbarg: callback argument
*/
static void
bfa_flash_read_send(void *cbarg)
@@ -2724,11 +2587,11 @@ bfa_flash_read_send(void *cbarg)
bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
}
-/*
- * Process flash response messages upon receiving interrupts.
+/**
+ * bfa_flash_intr - Process flash response messages upon receiving interrupts.
*
- * @param[in] flasharg - flash structure
- * @param[in] msg - message structure
+ * @flasharg: flash structure
+ * @msg: message structure
*/
static void
bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
@@ -2821,12 +2684,12 @@ bfa_nw_flash_meminfo(void)
return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
-/*
- * Flash attach API.
+/**
+ * bfa_nw_flash_attach - Flash attach API.
*
- * @param[in] flash - flash structure
- * @param[in] ioc - ioc structure
- * @param[in] dev - device structure
+ * @flash: flash structure
+ * @ioc: ioc structure
+ * @dev: device structure
*/
void
bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
@@ -2842,12 +2705,12 @@ bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
}
-/*
- * Claim memory for flash
+/**
+ * bfa_nw_flash_memclaim - Claim memory for flash
*
- * @param[in] flash - flash structure
- * @param[in] dm_kva - pointer to virtual memory address
- * @param[in] dm_pa - physical memory address
+ * @flash: flash structure
+ * @dm_kva: pointer to virtual memory address
+ * @dm_pa: physical memory address
*/
void
bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
@@ -2859,13 +2722,13 @@ bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
-/*
- * Get flash attribute.
+/**
+ * bfa_nw_flash_get_attr - Get flash attribute.
*
- * @param[in] flash - flash structure
- * @param[in] attr - flash attribute structure
- * @param[in] cbfn - callback function
- * @param[in] cbarg - callback argument
+ * @flash: flash structure
+ * @attr: flash attribute structure
+ * @cbfn: callback function
+ * @cbarg: callback argument
*
* Return status.
*/
@@ -2895,17 +2758,17 @@ bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
return BFA_STATUS_OK;
}
-/*
- * Update flash partition.
+/**
+ * bfa_nw_flash_update_part - Update flash partition.
*
- * @param[in] flash - flash structure
- * @param[in] type - flash partition type
- * @param[in] instance - flash partition instance
- * @param[in] buf - update data buffer
- * @param[in] len - data buffer length
- * @param[in] offset - offset relative to the partition starting address
- * @param[in] cbfn - callback function
- * @param[in] cbarg - callback argument
+ * @flash: flash structure
+ * @type: flash partition type
+ * @instance: flash partition instance
+ * @buf: update data buffer
+ * @len: data buffer length
+ * @offset: offset relative to the partition starting address
+ * @cbfn: callback function
+ * @cbarg: callback argument
*
* Return status.
*/
@@ -2944,17 +2807,17 @@ bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
return BFA_STATUS_OK;
}
-/*
- * Read flash partition.
+/**
+ * bfa_nw_flash_read_part - Read flash partition.
*
- * @param[in] flash - flash structure
- * @param[in] type - flash partition type
- * @param[in] instance - flash partition instance
- * @param[in] buf - read data buffer
- * @param[in] len - data buffer length
- * @param[in] offset - offset relative to the partition starting address
- * @param[in] cbfn - callback function
- * @param[in] cbarg - callback argument
+ * @flash: flash structure
+ * @type: flash partition type
+ * @instance: flash partition instance
+ * @buf: read data buffer
+ * @len: data buffer length
+ * @offset: offset relative to the partition starting address
+ * @cbfn: callback function
+ * @cbarg: callback argument
*
* Return status.
*/
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index 3b4460f..63a85e5 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -30,9 +30,7 @@
#define BNA_DBG_FWTRC_LEN (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \
BFI_IOC_TRC_HDR_SZ)
-/**
- * PCI device information required by IOC
- */
+/* PCI device information required by IOC */
struct bfa_pcidev {
int pci_slot;
u8 pci_func;
@@ -41,8 +39,7 @@ struct bfa_pcidev {
void __iomem *pci_bar_kva;
};
-/**
- * Structure used to remember the DMA-able memory block's KVA and Physical
+/* Structure used to remember the DMA-able memory block's KVA and Physical
* Address
*/
struct bfa_dma {
@@ -52,15 +49,11 @@ struct bfa_dma {
#define BFA_DMA_ALIGN_SZ 256
-/**
- * smem size for Crossbow and Catapult
- */
+/* smem size for Crossbow and Catapult */
#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
-/**
- * @brief BFA dma address assignment macro. (big endian format)
- */
+/* BFA dma address assignment macro. (big endian format) */
#define bfa_dma_be_addr_set(dma_addr, pa) \
__bfa_dma_be_addr_set(&dma_addr, (u64)pa)
static inline void
@@ -108,9 +101,7 @@ struct bfa_ioc_regs {
u32 smem_pg0;
};
-/**
- * IOC Mailbox structures
- */
+/* IOC Mailbox structures */
typedef void (*bfa_mbox_cmd_cbfn_t)(void *cbarg);
struct bfa_mbox_cmd {
struct list_head qe;
@@ -119,9 +110,7 @@ struct bfa_mbox_cmd {
u32 msg[BFI_IOC_MSGSZ];
};
-/**
- * IOC mailbox module
- */
+/* IOC mailbox module */
typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m);
struct bfa_ioc_mbox_mod {
struct list_head cmd_q; /*!< pending mbox queue */
@@ -132,9 +121,7 @@ struct bfa_ioc_mbox_mod {
} mbhdlr[BFI_MC_MAX];
};
-/**
- * IOC callback function interfaces
- */
+/* IOC callback function interfaces */
typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa);
typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa);
@@ -146,9 +133,7 @@ struct bfa_ioc_cbfn {
bfa_ioc_reset_cbfn_t reset_cbfn;
};
-/**
- * IOC event notification mechanism.
- */
+/* IOC event notification mechanism. */
enum bfa_ioc_event {
BFA_IOC_E_ENABLED = 1,
BFA_IOC_E_DISABLED = 2,
@@ -163,9 +148,7 @@ struct bfa_ioc_notify {
void *cbarg;
};
-/**
- * Initialize a IOC event notification structure
- */
+/* Initialize a IOC event notification structure */
#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \
(__notify)->cbfn = (__cbfn); \
(__notify)->cbarg = (__cbarg); \
@@ -261,9 +244,7 @@ struct bfa_ioc_hwif {
#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
-/**
- * IOC mailbox interface
- */
+/* IOC mailbox interface */
bool bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc,
struct bfa_mbox_cmd *cmd,
bfa_mbox_cmd_cbfn_t cbfn, void *cbarg);
@@ -271,9 +252,7 @@ void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc);
void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
-/**
- * IOC interfaces
- */
+/* IOC interfaces */
#define bfa_ioc_pll_init_asic(__ioc) \
((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index b6b036a..5df0b0c 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -87,9 +87,7 @@ static const struct bfa_ioc_hwif nw_hwif_ct2 = {
.ioc_sync_complete = bfa_ioc_ct_sync_complete,
};
-/**
- * Called from bfa_ioc_attach() to map asic specific calls.
- */
+/* Called from bfa_ioc_attach() to map asic specific calls. */
void
bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
{
@@ -102,9 +100,7 @@ bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc)
ioc->ioc_hwif = &nw_hwif_ct2;
}
-/**
- * Return true if firmware of current driver matches the running firmware.
- */
+/* Return true if firmware of current driver matches the running firmware. */
static bool
bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
{
@@ -182,9 +178,7 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
}
-/**
- * Notify other functions on HB failure.
- */
+/* Notify other functions on HB failure. */
static void
bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
{
@@ -195,9 +189,7 @@ bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
readl(ioc->ioc_regs.alt_ll_halt);
}
-/**
- * Host to LPU mailbox message addresses
- */
+/* Host to LPU mailbox message addresses */
static const struct {
u32 hfn_mbox;
u32 lpu_mbox;
@@ -209,9 +201,7 @@ static const struct {
{ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
};
-/**
- * Host <-> LPU mailbox command/status registers - port 0
- */
+/* Host <-> LPU mailbox command/status registers - port 0 */
static const struct {
u32 hfn;
u32 lpu;
@@ -222,9 +212,7 @@ static const struct {
{ HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
};
-/**
- * Host <-> LPU mailbox command/status registers - port 1
- */
+/* Host <-> LPU mailbox command/status registers - port 1 */
static const struct {
u32 hfn;
u32 lpu;
@@ -368,9 +356,7 @@ bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc)
ioc->ioc_regs.err_set = rb + ERR_SET_REG;
}
-/**
- * Initialize IOC to port mapping.
- */
+/* Initialize IOC to port mapping. */
#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
static void
@@ -398,9 +384,7 @@ bfa_ioc_ct2_map_port(struct bfa_ioc *ioc)
ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
}
-/**
- * Set interrupt mode for a function: INTX or MSIX
- */
+/* Set interrupt mode for a function: INTX or MSIX */
static void
bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
{
@@ -443,9 +427,7 @@ bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc)
return false;
}
-/**
- * MSI-X resource allocation for 1860 with no asic block
- */
+/* MSI-X resource allocation for 1860 with no asic block */
#define HOSTFN_MSIX_DEFAULT 64
#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
#define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
@@ -473,9 +455,7 @@ bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc)
rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
}
-/**
- * Cleanup hw semaphore and usecnt registers
- */
+/* Cleanup hw semaphore and usecnt registers */
static void
bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
{
@@ -492,9 +472,7 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
bfa_nw_ioc_hw_sem_release(ioc);
}
-/**
- * Synchronized IOC failure processing routines
- */
+/* Synchronized IOC failure processing routines */
static bool
bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
{
@@ -518,9 +496,7 @@ bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
return bfa_ioc_ct_sync_complete(ioc);
}
-/**
- * Synchronized IOC failure processing routines
- */
+/* Synchronized IOC failure processing routines */
static void
bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
{
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
index dd36427..55067d0 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_msgq.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
@@ -16,9 +16,7 @@
* www.brocade.com
*/
-/**
- * @file bfa_msgq.c MSGQ module source file.
- */
+/* MSGQ module source file. */
#include "bfi.h"
#include "bfa_msgq.h"
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 0d9df69..1f24c23 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -22,15 +22,11 @@
#pragma pack(1)
-/**
- * BFI FW image type
- */
+/* BFI FW image type */
#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
-/**
- * Msg header common to all msgs
- */
+/* Msg header common to all msgs */
struct bfi_mhdr {
u8 msg_class; /*!< @ref enum bfi_mclass */
u8 msg_id; /*!< msg opcode with in the class */
@@ -65,17 +61,14 @@ struct bfi_mhdr {
#define BFI_I2H_OPCODE_BASE 128
#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
-/**
- ****************************************************************************
+/****************************************************************************
*
* Scatter Gather Element and Page definition
*
****************************************************************************
*/
-/**
- * DMA addresses
- */
+/* DMA addresses */
union bfi_addr_u {
struct {
u32 addr_lo;
@@ -83,9 +76,7 @@ union bfi_addr_u {
} a32;
};
-/**
- * Generic DMA addr-len pair.
- */
+/* Generic DMA addr-len pair. */
struct bfi_alen {
union bfi_addr_u al_addr; /* DMA addr of buffer */
u32 al_len; /* length of buffer */
@@ -98,26 +89,20 @@ struct bfi_alen {
#define BFI_LMSG_PL_WSZ \
((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4)
-/**
- * Mailbox message structure
- */
+/* Mailbox message structure */
#define BFI_MBMSG_SZ 7
struct bfi_mbmsg {
struct bfi_mhdr mh;
u32 pl[BFI_MBMSG_SZ];
};
-/**
- * Supported PCI function class codes (personality)
- */
+/* Supported PCI function class codes (personality) */
enum bfi_pcifn_class {
BFI_PCIFN_CLASS_FC = 0x0c04,
BFI_PCIFN_CLASS_ETH = 0x0200,
};
-/**
- * Message Classes
- */
+/* Message Classes */
enum bfi_mclass {
BFI_MC_IOC = 1, /*!< IO Controller (IOC) */
BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */
@@ -159,15 +144,12 @@ enum bfi_mclass {
#define BFI_FWBOOT_ENV_OS 0
-/**
- *----------------------------------------------------------------------
+/*----------------------------------------------------------------------
* IOC
*----------------------------------------------------------------------
*/
-/**
- * Different asic generations
- */
+/* Different asic generations */
enum bfi_asic_gen {
BFI_ASIC_GEN_CB = 1,
BFI_ASIC_GEN_CT = 2,
@@ -196,9 +178,7 @@ enum bfi_ioc_i2h_msgs {
BFI_IOC_I2H_HBEAT = BFA_I2HM(4),
};
-/**
- * BFI_IOC_H2I_GETATTR_REQ message
- */
+/* BFI_IOC_H2I_GETATTR_REQ message */
struct bfi_ioc_getattr_req {
struct bfi_mhdr mh;
union bfi_addr_u attr_addr;
@@ -231,30 +211,22 @@ struct bfi_ioc_attr {
u32 card_type; /*!< card type */
};
-/**
- * BFI_IOC_I2H_GETATTR_REPLY message
- */
+/* BFI_IOC_I2H_GETATTR_REPLY message */
struct bfi_ioc_getattr_reply {
struct bfi_mhdr mh; /*!< Common msg header */
u8 status; /*!< cfg reply status */
u8 rsvd[3];
};
-/**
- * Firmware memory page offsets
- */
+/* Firmware memory page offsets */
#define BFI_IOC_SMEM_PG0_CB (0x40)
#define BFI_IOC_SMEM_PG0_CT (0x180)
-/**
- * Firmware statistic offset
- */
+/* Firmware statistic offset */
#define BFI_IOC_FWSTATS_OFF (0x6B40)
#define BFI_IOC_FWSTATS_SZ (4096)
-/**
- * Firmware trace offset
- */
+/* Firmware trace offset */
#define BFI_IOC_TRC_OFF (0x4b00)
#define BFI_IOC_TRC_ENTS 256
#define BFI_IOC_TRC_ENT_SZ 16
@@ -299,9 +271,7 @@ struct bfi_ioc_hbeat {
u32 hb_count; /*!< current heart beat count */
};
-/**
- * IOC hardware/firmware state
- */
+/* IOC hardware/firmware state */
enum bfi_ioc_state {
BFI_IOC_UNINIT = 0, /*!< not initialized */
BFI_IOC_INITING = 1, /*!< h/w is being initialized */
@@ -345,9 +315,7 @@ enum {
((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
BFI_ADAPTER_UNSUPP))
-/**
- * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
- */
+/* BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages */
struct bfi_ioc_ctrl_req {
struct bfi_mhdr mh;
u16 clscode;
@@ -355,9 +323,7 @@ struct bfi_ioc_ctrl_req {
u32 tv_sec;
};
-/**
- * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
- */
+/* BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages */
struct bfi_ioc_ctrl_reply {
struct bfi_mhdr mh; /*!< Common msg header */
u8 status; /*!< enable/disable status */
@@ -367,9 +333,7 @@ struct bfi_ioc_ctrl_reply {
};
#define BFI_IOC_MSGSZ 8
-/**
- * H2I Messages
- */
+/* H2I Messages */
union bfi_ioc_h2i_msg_u {
struct bfi_mhdr mh;
struct bfi_ioc_ctrl_req enable_req;
@@ -378,17 +342,14 @@ union bfi_ioc_h2i_msg_u {
u32 mboxmsg[BFI_IOC_MSGSZ];
};
-/**
- * I2H Messages
- */
+/* I2H Messages */
union bfi_ioc_i2h_msg_u {
struct bfi_mhdr mh;
struct bfi_ioc_ctrl_reply fw_event;
u32 mboxmsg[BFI_IOC_MSGSZ];
};
-/**
- *----------------------------------------------------------------------
+/*----------------------------------------------------------------------
* MSGQ
*----------------------------------------------------------------------
*/
diff --git a/drivers/net/ethernet/brocade/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h
index 4eecabe..6704a43 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h
@@ -37,18 +37,14 @@ enum bfi_port_i2h {
BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
};
-/**
- * Generic REQ type
- */
+/* Generic REQ type */
struct bfi_port_generic_req {
struct bfi_mhdr mh; /*!< msg header */
u32 msgtag; /*!< msgtag for reply */
u32 rsvd;
};
-/**
- * Generic RSP type
- */
+/* Generic RSP type */
struct bfi_port_generic_rsp {
struct bfi_mhdr mh; /*!< common msg header */
u8 status; /*!< port enable status */
@@ -56,44 +52,12 @@ struct bfi_port_generic_rsp {
u32 msgtag; /*!< msgtag for reply */
};
-/**
- * @todo
- * BFI_PORT_H2I_ENABLE_REQ
- */
-
-/**
- * @todo
- * BFI_PORT_I2H_ENABLE_RSP
- */
-
-/**
- * BFI_PORT_H2I_DISABLE_REQ
- */
-
-/**
- * BFI_PORT_I2H_DISABLE_RSP
- */
-
-/**
- * BFI_PORT_H2I_GET_STATS_REQ
- */
+/* BFI_PORT_H2I_GET_STATS_REQ */
struct bfi_port_get_stats_req {
struct bfi_mhdr mh; /*!< common msg header */
union bfi_addr_u dma_addr;
};
-/**
- * BFI_PORT_I2H_GET_STATS_RSP
- */
-
-/**
- * BFI_PORT_H2I_CLEAR_STATS_REQ
- */
-
-/**
- * BFI_PORT_I2H_CLEAR_STATS_RSP
- */
-
union bfi_port_h2i_msg_u {
struct bfi_mhdr mh;
struct bfi_port_generic_req enable_req;
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
index a90f1cf..eef6e1f 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_enet.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -16,12 +16,9 @@
* www.brocade.com
*/
-/**
- * @file bfi_enet.h BNA Hardware and Firmware Interface
- */
+/* BNA Hardware and Firmware Interface */
-/**
- * Skipping statistics collection to avoid clutter.
+/* Skipping statistics collection to avoid clutter.
* Command is no longer needed:
* MTU
* TxQ Stop
@@ -64,9 +61,7 @@ union bfi_addr_be_u {
} a32;
};
-/**
- * T X Q U E U E D E F I N E S
- */
+/* T X Q U E U E D E F I N E S */
/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
/* TxQ Entry Opcodes */
#define BFI_ENET_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
@@ -106,10 +101,7 @@ struct bfi_enet_txq_wi_vector { /* Tx Buffer Descriptor */
union bfi_addr_be_u addr;
};
-/**
- * TxQ Entry Structure
- *
- */
+/* TxQ Entry Structure */
struct bfi_enet_txq_entry {
union {
struct bfi_enet_txq_wi_base base;
@@ -124,16 +116,12 @@ struct bfi_enet_txq_entry {
#define BFI_ENET_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
(((_hdr_size) << 10) | ((_offset) & 0x3FF))
-/**
- * R X Q U E U E D E F I N E S
- */
+/* R X Q U E U E D E F I N E S */
struct bfi_enet_rxq_entry {
union bfi_addr_be_u rx_buffer;
};
-/**
- * R X C O M P L E T I O N Q U E U E D E F I N E S
- */
+/* R X C O M P L E T I O N Q U E U E D E F I N E S */
/* CQ Entry Flags */
#define BFI_ENET_CQ_EF_MAC_ERROR (1 << 0)
#define BFI_ENET_CQ_EF_FCS_ERROR (1 << 1)
@@ -174,9 +162,7 @@ struct bfi_enet_cq_entry {
u8 rxq_id;
};
-/**
- * E N E T C O N T R O L P A T H C O M M A N D S
- */
+/* E N E T C O N T R O L P A T H C O M M A N D S */
struct bfi_enet_q {
union bfi_addr_u pg_tbl;
union bfi_addr_u first_entry;
@@ -222,9 +208,7 @@ struct bfi_enet_ib {
u16 rsvd;
};
-/**
- * ENET command messages
- */
+/* ENET command messages */
enum bfi_enet_h2i_msgs {
/* Rx Commands */
BFI_ENET_H2I_RX_CFG_SET_REQ = 1,
@@ -350,9 +334,7 @@ enum bfi_enet_i2h_msgs {
BFI_ENET_I2H_BW_UPDATE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 4),
};
-/**
- * The following error codes can be returned by the enet commands
- */
+/* The following error codes can be returned by the enet commands */
enum bfi_enet_err {
BFI_ENET_CMD_OK = 0,
BFI_ENET_CMD_FAIL = 1,
@@ -364,8 +346,7 @@ enum bfi_enet_err {
BFI_ENET_CMD_PORT_DISABLED = 7, /* !< port in disabled state */
};
-/**
- * Generic Request
+/* Generic Request
*
* bfi_enet_req is used by:
* BFI_ENET_H2I_RX_CFG_CLR_REQ
@@ -375,8 +356,7 @@ struct bfi_enet_req {
struct bfi_msgq_mhdr mh;
};
-/**
- * Enable/Disable Request
+/* Enable/Disable Request
*
* bfi_enet_enable_req is used by:
* BFI_ENET_H2I_RSS_ENABLE_REQ (enet_id must be zero)
@@ -391,9 +371,7 @@ struct bfi_enet_enable_req {
u8 rsvd[3];
};
-/**
- * Generic Response
- */
+/* Generic Response */
struct bfi_enet_rsp {
struct bfi_msgq_mhdr mh;
u8 error; /*!< if error see cmd_offset */
@@ -401,20 +379,16 @@ struct bfi_enet_rsp {
u16 cmd_offset; /*!< offset to invalid parameter */
};
-/**
- * GLOBAL CONFIGURATION
- */
+/* GLOBAL CONFIGURATION */
-/**
- * bfi_enet_attr_req is used by:
+/* bfi_enet_attr_req is used by:
* BFI_ENET_H2I_GET_ATTR_REQ
*/
struct bfi_enet_attr_req {
struct bfi_msgq_mhdr mh;
};
-/**
- * bfi_enet_attr_rsp is used by:
+/* bfi_enet_attr_rsp is used by:
* BFI_ENET_I2H_GET_ATTR_RSP
*/
struct bfi_enet_attr_rsp {
@@ -427,8 +401,7 @@ struct bfi_enet_attr_rsp {
u32 rit_size;
};
-/**
- * Tx Configuration
+/* Tx Configuration
*
* bfi_enet_tx_cfg is used by:
* BFI_ENET_H2I_TX_CFG_SET_REQ
@@ -477,8 +450,7 @@ struct bfi_enet_tx_cfg_rsp {
} q_handles[BFI_ENET_TXQ_PRIO_MAX];
};
-/**
- * Rx Configuration
+/* Rx Configuration
*
* bfi_enet_rx_cfg is used by:
* BFI_ENET_H2I_RX_CFG_SET_REQ
@@ -553,8 +525,7 @@ struct bfi_enet_rx_cfg_rsp {
} q_handles[BFI_ENET_RX_QSET_MAX];
};
-/**
- * RIT
+/* RIT
*
* bfi_enet_rit_req is used by:
* BFI_ENET_H2I_RIT_CFG_REQ
@@ -566,8 +537,7 @@ struct bfi_enet_rit_req {
u8 table[BFI_ENET_RSS_RIT_MAX];
};
-/**
- * RSS
+/* RSS
*
* bfi_enet_rss_cfg_req is used by:
* BFI_ENET_H2I_RSS_CFG_REQ
@@ -591,8 +561,7 @@ struct bfi_enet_rss_cfg_req {
struct bfi_enet_rss_cfg cfg;
};
-/**
- * MAC Unicast
+/* MAC Unicast
*
* bfi_enet_rx_vlan_req is used by:
* BFI_ENET_H2I_MAC_UCAST_SET_REQ
@@ -606,17 +575,14 @@ struct bfi_enet_ucast_req {
u8 rsvd[2];
};
-/**
- * MAC Unicast + VLAN
- */
+/* MAC Unicast + VLAN */
struct bfi_enet_mac_n_vlan_req {
struct bfi_msgq_mhdr mh;
u16 vlan_id;
mac_t mac_addr;
};
-/**
- * MAC Multicast
+/* MAC Multicast
*
* bfi_enet_mac_mfilter_add_req is used by:
* BFI_ENET_H2I_MAC_MCAST_ADD_REQ
@@ -627,8 +593,7 @@ struct bfi_enet_mcast_add_req {
u8 rsvd[2];
};
-/**
- * bfi_enet_mac_mfilter_add_rsp is used by:
+/* bfi_enet_mac_mfilter_add_rsp is used by:
* BFI_ENET_I2H_MAC_MCAST_ADD_RSP
*/
struct bfi_enet_mcast_add_rsp {
@@ -640,8 +605,7 @@ struct bfi_enet_mcast_add_rsp {
u8 rsvd1[2];
};
-/**
- * bfi_enet_mac_mfilter_del_req is used by:
+/* bfi_enet_mac_mfilter_del_req is used by:
* BFI_ENET_H2I_MAC_MCAST_DEL_REQ
*/
struct bfi_enet_mcast_del_req {
@@ -650,8 +614,7 @@ struct bfi_enet_mcast_del_req {
u8 rsvd[2];
};
-/**
- * VLAN
+/* VLAN
*
* bfi_enet_rx_vlan_req is used by:
* BFI_ENET_H2I_RX_VLAN_SET_REQ
@@ -663,8 +626,7 @@ struct bfi_enet_rx_vlan_req {
u32 bit_mask[BFI_ENET_VLAN_WORDS_MAX];
};
-/**
- * PAUSE
+/* PAUSE
*
* bfi_enet_set_pause_req is used by:
* BFI_ENET_H2I_SET_PAUSE_REQ
@@ -676,8 +638,7 @@ struct bfi_enet_set_pause_req {
u8 rx_pause; /* 1 = enable; 0 = disable */
};
-/**
- * DIAGNOSTICS
+/* DIAGNOSTICS
*
* bfi_enet_diag_lb_req is used by:
* BFI_ENET_H2I_DIAG_LOOPBACK
@@ -689,16 +650,13 @@ struct bfi_enet_diag_lb_req {
u8 enable; /* 1 = enable; 0 = disable */
};
-/**
- * enum for Loopback opmodes
- */
+/* enum for Loopback opmodes */
enum {
BFI_ENET_DIAG_LB_OPMODE_EXT = 0,
BFI_ENET_DIAG_LB_OPMODE_CBL = 1,
};
-/**
- * STATISTICS
+/* STATISTICS
*
* bfi_enet_stats_req is used by:
* BFI_ENET_H2I_STATS_GET_REQ
@@ -713,9 +671,7 @@ struct bfi_enet_stats_req {
union bfi_addr_u host_buffer;
};
-/**
- * defines for "stats_mask" above.
- */
+/* defines for "stats_mask" above. */
#define BFI_ENET_STATS_MAC (1 << 0) /* !< MAC Statistics */
#define BFI_ENET_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */
#define BFI_ENET_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */
@@ -881,8 +837,7 @@ struct bfi_enet_stats_mac {
u64 tx_fragments;
};
-/**
- * Complete statistics, DMAed from fw to host followed by
+/* Complete statistics, DMAed from fw to host followed by
* BFI_ENET_I2H_STATS_GET_RSP
*/
struct bfi_enet_stats {
diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h
index 0e094fe..c49fa31 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_reg.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h
@@ -221,9 +221,7 @@ enum {
#define __PMM_1T_RESET_P 0x00000001
#define PMM_1T_RESET_REG_P1 0x00023c1c
-/**
- * Brocade 1860 Adapter specific defines
- */
+/* Brocade 1860 Adapter specific defines */
#define CT2_PCI_CPQ_BASE 0x00030000
#define CT2_PCI_APP_BASE 0x00030100
#define CT2_PCI_ETH_BASE 0x00030400
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index 4d7a5de..ede532b 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -25,11 +25,7 @@
extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
-/**
- *
- * Macros and constants
- *
- */
+/* Macros and constants */
#define BNA_IOC_TIMER_FREQ 200
@@ -356,11 +352,7 @@ do { \
} \
} while (0)
-/**
- *
- * Inline functions
- *
- */
+/* Inline functions */
static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
{
@@ -377,15 +369,9 @@ static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
#define bna_attr(_bna) (&(_bna)->ioceth.attr)
-/**
- *
- * Function prototypes
- *
- */
+/* Function prototypes */
-/**
- * BNA
- */
+/* BNA */
/* FW response handlers */
void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr);
@@ -413,24 +399,19 @@ struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
struct bna_mcam_handle *handle);
-/**
- * MBOX
- */
+/* MBOX */
/* API for BNAD */
void bna_mbox_handler(struct bna *bna, u32 intr_status);
-/**
- * ETHPORT
- */
+/* ETHPORT */
/* Callbacks for RX */
void bna_ethport_cb_rx_started(struct bna_ethport *ethport);
void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport);
-/**
- * TX MODULE AND TX
- */
+/* TX MODULE AND TX */
+
/* FW response handelrs */
void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx,
struct bfi_msgq_mhdr *msghdr);
@@ -462,9 +443,7 @@ void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
void bna_tx_cleanup_complete(struct bna_tx *tx);
void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
-/**
- * RX MODULE, RX, RXF
- */
+/* RX MODULE, RX, RXF */
/* FW response handlers */
void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx,
@@ -522,9 +501,7 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
void bna_rx_vlanfilter_enable(struct bna_rx *rx);
-/**
- * ENET
- */
+/* ENET */
/* API for RX */
int bna_enet_mtu_get(struct bna_enet *enet);
@@ -544,18 +521,14 @@ void bna_enet_mtu_set(struct bna_enet *enet, int mtu,
void (*cbfn)(struct bnad *));
void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac);
-/**
- * IOCETH
- */
+/* IOCETH */
/* APIs for BNAD */
void bna_ioceth_enable(struct bna_ioceth *ioceth);
void bna_ioceth_disable(struct bna_ioceth *ioceth,
enum bna_cleanup_type type);
-/**
- * BNAD
- */
+/* BNAD */
/* Callbacks for ENET */
void bnad_cb_ethport_link_status(struct bnad *bnad,
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 9ccc586..db14f69 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -378,9 +378,8 @@ bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
}
}
-/**
- * ETHPORT
- */
+/* ETHPORT */
+
#define call_ethport_stop_cbfn(_ethport) \
do { \
if ((_ethport)->stop_cbfn) { \
@@ -804,9 +803,8 @@ bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
}
}
-/**
- * ENET
- */
+/* ENET */
+
#define bna_enet_chld_start(enet) \
do { \
enum bna_tx_type tx_type = \
@@ -1328,9 +1326,8 @@ bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
*mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
}
-/**
- * IOCETH
- */
+/* IOCETH */
+
#define enable_mbox_intr(_ioceth) \
do { \
u32 intr_status; \
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
index 4c6aab2..b8c4e21 100644
--- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -16,20 +16,15 @@
* www.brocade.com
*/
-/**
- * File for interrupt macros and functions
- */
+/* File for interrupt macros and functions */
#ifndef __BNA_HW_DEFS_H__
#define __BNA_HW_DEFS_H__
#include "bfi_reg.h"
-/**
- *
- * SW imposed limits
- *
- */
+/* SW imposed limits */
+
#define BFI_ENET_DEF_TXQ 1
#define BFI_ENET_DEF_RXP 1
#define BFI_ENET_DEF_UCAM 1
@@ -141,11 +136,8 @@
}
#define bna_port_id_get(_bna) ((_bna)->ioceth.ioc.port_id)
-/**
- *
- * Interrupt related bits, flags and macros
- *
- */
+
+/* Interrupt related bits, flags and macros */
#define IB_STATUS_BITS 0x0000ffff
@@ -280,11 +272,7 @@ do { \
(writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \
(_rcb)->q_dbell));
-/**
- *
- * TxQ, RxQ, CQ related bits, offsets, macros
- *
- */
+/* TxQ, RxQ, CQ related bits, offsets, macros */
/* TxQ Entry Opcodes */
#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
@@ -334,11 +322,7 @@ do { \
#define BNA_CQ_EF_LOCAL (1 << 20)
-/**
- *
- * Data structures
- *
- */
+/* Data structures */
struct bna_reg_offset {
u32 fn_int_status;
@@ -371,8 +355,7 @@ struct bna_txq_wi_vector {
struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */
};
-/**
- * TxQ Entry Structure
+/* TxQ Entry Structure
*
* BEWARE: Load values into this structure with correct endianess.
*/
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 276fcb5..71144b39 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -18,9 +18,7 @@
#include "bna.h"
#include "bfi.h"
-/**
- * IB
- */
+/* IB */
static void
bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
{
@@ -29,9 +27,7 @@ bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
(u32)ib->coalescing_timeo, 0);
}
-/**
- * RXF
- */
+/* RXF */
#define bna_rxf_vlan_cfg_soft_reset(rxf) \
do { \
@@ -1312,9 +1308,7 @@ bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
return 0;
}
-/**
- * RX
- */
+/* RX */
#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
(qcfg)->num_paths : ((qcfg)->num_paths * 2))
@@ -2791,9 +2785,8 @@ const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
{1, 2},
};
-/**
- * TX
- */
+/* TX */
+
#define call_tx_stop_cbfn(tx) \
do { \
if ((tx)->stop_cbfn) { \
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index e8d3ab7..d3eb8bd 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -23,11 +23,7 @@
#include "bfa_cee.h"
#include "bfa_msgq.h"
-/**
- *
- * Forward declarations
- *
- */
+/* Forward declarations */
struct bna_mcam_handle;
struct bna_txq;
@@ -40,11 +36,7 @@ struct bna_enet;
struct bna;
struct bnad;
-/**
- *
- * Enums, primitive data types
- *
- */
+/* Enums, primitive data types */
enum bna_status {
BNA_STATUS_T_DISABLED = 0,
@@ -331,11 +323,7 @@ struct bna_attr {
int max_rit_size;
};
-/**
- *
- * IOCEth
- *
- */
+/* IOCEth */
struct bna_ioceth {
bfa_fsm_t fsm;
@@ -351,11 +339,7 @@ struct bna_ioceth {
struct bna *bna;
};
-/**
- *
- * Enet
- *
- */
+/* Enet */
/* Pause configuration */
struct bna_pause_config {
@@ -390,11 +374,7 @@ struct bna_enet {
struct bna *bna;
};
-/**
- *
- * Ethport
- *
- */
+/* Ethport */
struct bna_ethport {
bfa_fsm_t fsm;
@@ -419,11 +399,7 @@ struct bna_ethport {
struct bna *bna;
};
-/**
- *
- * Interrupt Block
- *
- */
+/* Interrupt Block */
/* Doorbell structure */
struct bna_ib_dbell {
@@ -447,11 +423,7 @@ struct bna_ib {
int interpkt_timeo;
};
-/**
- *
- * Tx object
- *
- */
+/* Tx object */
/* Tx datapath control structure */
#define BNA_Q_NAME_SIZE 16
@@ -585,11 +557,7 @@ struct bna_tx_mod {
struct bna *bna;
};
-/**
- *
- * Rx object
- *
- */
+/* Rx object */
/* Rx datapath control structure */
struct bna_rcb {
@@ -898,11 +866,7 @@ struct bna_rx_mod {
u32 rid_mask;
};
-/**
- *
- * CAM
- *
- */
+/* CAM */
struct bna_ucam_mod {
struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */
@@ -927,11 +891,7 @@ struct bna_mcam_mod {
struct bna *bna;
};
-/**
- *
- * Statistics
- *
- */
+/* Statistics */
struct bna_stats {
struct bna_dma_addr hw_stats_dma;
@@ -949,11 +909,7 @@ struct bna_stats_mod {
struct bfi_enet_stats_req stats_clr;
};
-/**
- *
- * BNA
- *
- */
+/* BNA */
struct bna {
struct bna_ident ident;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 67cd2ed..b441f33 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1302,8 +1302,7 @@ bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
return 0;
}
-/**
- * NOTE: Should be called for MSIX only
+/* NOTE: Should be called for MSIX only
* Unregisters Tx MSIX vector(s) from the kernel
*/
static void
@@ -1322,8 +1321,7 @@ bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
}
}
-/**
- * NOTE: Should be called for MSIX only
+/* NOTE: Should be called for MSIX only
* Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
*/
static int
@@ -1354,8 +1352,7 @@ err_return:
return -1;
}
-/**
- * NOTE: Should be called for MSIX only
+/* NOTE: Should be called for MSIX only
* Unregisters Rx MSIX vector(s) from the kernel
*/
static void
@@ -1375,8 +1372,7 @@ bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
}
}
-/**
- * NOTE: Should be called for MSIX only
+/* NOTE: Should be called for MSIX only
* Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
*/
static int
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 72742be..d783392 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -389,9 +389,7 @@ extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
void bnad_debugfs_init(struct bnad *bnad);
void bnad_debugfs_uninit(struct bnad *bnad);
-/**
- * MACROS
- */
+/* MACROS */
/* To set & get the stats counters */
#define BNAD_UPDATE_CTR(_bnad, _ctr) \
(((_bnad)->stats.drv_stats._ctr)++)
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
index cfc22a6..6a68e8d 100644
--- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c
+++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
@@ -67,10 +67,10 @@ bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off)
{
switch (asic_gen) {
case BFI_ASIC_GEN_CT:
- return (u32 *)(bfi_image_ct_cna + off);
+ return (bfi_image_ct_cna + off);
break;
case BFI_ASIC_GEN_CT2:
- return (u32 *)(bfi_image_ct2_cna + off);
+ return (bfi_image_ct2_cna + off);
break;
default:
return NULL;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 1466bc4..033064b 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -179,13 +179,16 @@ static void macb_handle_link_change(struct net_device *dev)
spin_unlock_irqrestore(&bp->lock, flags);
if (status_change) {
- if (phydev->link)
+ if (phydev->link) {
+ netif_carrier_on(dev);
netdev_info(dev, "link up (%d/%s)\n",
phydev->speed,
phydev->duplex == DUPLEX_FULL ?
"Full" : "Half");
- else
+ } else {
+ netif_carrier_off(dev);
netdev_info(dev, "link down\n");
+ }
}
}
@@ -1033,6 +1036,9 @@ static int macb_open(struct net_device *dev)
netdev_dbg(bp->dev, "open\n");
+ /* carrier starts down */
+ netif_carrier_off(dev);
+
/* if the phy is not yet register, retry later*/
if (!bp->phy_dev)
return -EAGAIN;
@@ -1406,6 +1412,8 @@ static int __init macb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
+ netif_carrier_off(dev);
+
netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n",
macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr,
dev->irq, dev->dev_addr);
@@ -1469,6 +1477,7 @@ static int macb_suspend(struct platform_device *pdev, pm_message_t state)
struct net_device *netdev = platform_get_drvdata(pdev);
struct macb *bp = netdev_priv(netdev);
+ netif_carrier_off(netdev);
netif_device_detach(netdev);
clk_disable(bp->hclk);
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 11f667f..2b4b4f5 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -264,7 +264,7 @@
#define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */
#define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */
#define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */
-#define XGMAC_OMR_RTC 0x00000010 /* RX Threshhold Ctrl */
+#define XGMAC_OMR_RTC_256 0x00000018 /* RX Threshhold Ctrl */
#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */
/* XGMAC HW Features Register */
@@ -671,26 +671,23 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
p = priv->dma_rx + entry;
- if (priv->rx_skbuff[entry] != NULL)
- continue;
-
- skb = __skb_dequeue(&priv->rx_recycle);
- if (skb == NULL)
- skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
- if (unlikely(skb == NULL))
- break;
-
- priv->rx_skbuff[entry] = skb;
- paddr = dma_map_single(priv->device, skb->data,
- priv->dma_buf_sz, DMA_FROM_DEVICE);
- desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
+ if (priv->rx_skbuff[entry] == NULL) {
+ skb = __skb_dequeue(&priv->rx_recycle);
+ if (skb == NULL)
+ skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
+ if (unlikely(skb == NULL))
+ break;
+
+ priv->rx_skbuff[entry] = skb;
+ paddr = dma_map_single(priv->device, skb->data,
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+ desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
+ }
netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n",
priv->rx_head, priv->rx_tail);
priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ);
- /* Ensure descriptor is in memory before handing to h/w */
- wmb();
desc_set_rx_owner(p);
}
}
@@ -933,6 +930,7 @@ static void xgmac_tx_err(struct xgmac_priv *priv)
desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
priv->tx_tail = 0;
priv->tx_head = 0;
+ writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
@@ -972,7 +970,7 @@ static int xgmac_hw_init(struct net_device *dev)
writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
/* XGMAC requires AXI bus init. This is a 'magic number' for now */
- writel(0x000100E, ioaddr + XGMAC_DMA_AXI_BUS);
+ writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS |
XGMAC_CONTROL_CAR;
@@ -984,7 +982,8 @@ static int xgmac_hw_init(struct net_device *dev)
writel(value, ioaddr + XGMAC_DMA_CONTROL);
/* Set the HW DMA mode and the COE */
- writel(XGMAC_OMR_TSF | XGMAC_OMR_RSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA,
+ writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA |
+ XGMAC_OMR_RTC_256,
ioaddr + XGMAC_OMR);
/* Reset the MMC counters */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index abb6ce7..9b08749 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3050,7 +3050,7 @@ static struct pci_error_handlers t3_err_handler = {
static void set_nqsets(struct adapter *adap)
{
int i, j = 0;
- int num_cpus = num_online_cpus();
+ int num_cpus = netif_get_num_default_rss_queues();
int hwports = adap->params.nports;
int nqsets = adap->msix_nvectors - 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 65e4b28..2dbbcbb 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -62,7 +62,9 @@ static const unsigned int MAX_ATIDS = 64 * 1024;
static const unsigned int ATID_BASE = 0x10000;
static void cxgb_neigh_update(struct neighbour *neigh);
-static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new);
+static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
+ struct dst_entry *new, struct neighbour *new_neigh,
+ const void *daddr);
static inline int offload_activated(struct t3cdev *tdev)
{
@@ -575,7 +577,7 @@ static void t3_process_tid_release_list(struct work_struct *work)
if (!skb) {
spin_lock_bh(&td->tid_release_lock);
p->ctx = (void *)td->tid_release_list;
- td->tid_release_list = (struct t3c_tid_entry *)p;
+ td->tid_release_list = p;
break;
}
mk_tid_release(skb, p - td->tid_maps.tid_tab);
@@ -968,8 +970,10 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
}
case (NETEVENT_REDIRECT):{
struct netevent_redirect *nr = ctx;
- cxgb_redirect(nr->old, nr->new);
- cxgb_neigh_update(dst_get_neighbour_noref(nr->new));
+ cxgb_redirect(nr->old, nr->old_neigh,
+ nr->new, nr->new_neigh,
+ nr->daddr);
+ cxgb_neigh_update(nr->new_neigh);
break;
}
default:
@@ -1107,10 +1111,11 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
tdev->send(tdev, skb);
}
-static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
+static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
+ struct dst_entry *new, struct neighbour *new_neigh,
+ const void *daddr)
{
struct net_device *olddev, *newdev;
- struct neighbour *n;
struct tid_info *ti;
struct t3cdev *tdev;
u32 tid;
@@ -1118,15 +1123,8 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
struct l2t_entry *e;
struct t3c_tid_entry *te;
- n = dst_get_neighbour_noref(old);
- if (!n)
- return;
- olddev = n->dev;
-
- n = dst_get_neighbour_noref(new);
- if (!n)
- return;
- newdev = n->dev;
+ olddev = old_neigh->dev;
+ newdev = new_neigh->dev;
if (!is_offloading(olddev))
return;
@@ -1144,7 +1142,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
}
/* Add new L2T entry */
- e = t3_l2t_get(tdev, new, newdev);
+ e = t3_l2t_get(tdev, new, newdev, daddr);
if (!e) {
printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
__func__);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 3fa3c88..8d53438 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -299,7 +299,7 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
}
struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
- struct net_device *dev)
+ struct net_device *dev, const void *daddr)
{
struct l2t_entry *e = NULL;
struct neighbour *neigh;
@@ -311,7 +311,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
int smt_idx;
rcu_read_lock();
- neigh = dst_get_neighbour_noref(dst);
+ neigh = dst_neigh_lookup(dst, daddr);
if (!neigh)
goto done_rcu;
@@ -360,6 +360,8 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
done_unlock:
write_unlock_bh(&d->lock);
done_rcu:
+ if (neigh)
+ neigh_release(neigh);
rcu_read_unlock();
return e;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index c4e8643..8cffcdf 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -110,7 +110,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb,
void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
- struct net_device *dev);
+ struct net_device *dev, const void *daddr);
int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e);
void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index cfb60e1..dd901c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -2877,7 +2877,7 @@ static void sge_timer_tx(unsigned long data)
mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
}
-/*
+/**
* sge_timer_rx - perform periodic maintenance of an SGE qset
* @data: the SGE queue set to maintain
*
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index 44ac2f4..bff8a3c 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -1076,7 +1076,7 @@ static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
return 0;
}
-/*
+/**
* t3_load_fw - download firmware
* @adapter: the adapter
* @fw_data: the firmware image to write
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index e1f96fb..5ed49af 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3493,8 +3493,8 @@ static void __devinit cfg_queues(struct adapter *adap)
*/
if (n10g)
q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
- if (q10g > num_online_cpus())
- q10g = num_online_cpus();
+ if (q10g > netif_get_num_default_rss_queues())
+ q10g = netif_get_num_default_rss_queues();
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index e111d97..8596aca 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -753,7 +753,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
end = (void *)q->desc + part1;
}
if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
- *(u64 *)end = 0;
+ *end = 0;
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 32e1dd5..fa947df 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2010,7 +2010,7 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
-/*
+/**
* t4_mem_win_read_len - read memory through PCIE memory window
* @adap: the adapter
* @addr: address of first byte requested aligned on 32b.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 25e3308..9dad561 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -418,7 +418,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
* restart a TX Ethernet Queue which was stopped for lack of
* free TX Queue Descriptors ...
*/
- const struct cpl_sge_egr_update *p = (void *)cpl;
+ const struct cpl_sge_egr_update *p = cpl;
unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid));
struct sge *s = &adapter->sge;
struct sge_txq *tq;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 0bd585b..f2d1ecd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -934,7 +934,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
end = (void *)tq->desc + part1;
}
if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
- *(u64 *)end = 0;
+ *end = 0;
}
/**
@@ -1323,8 +1323,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (unlikely((void *)sgl == (void *)tq->stat)) {
sgl = (void *)tq->desc;
- end = (void *)((void *)tq->desc +
- ((void *)end - (void *)tq->stat));
+ end = ((void *)tq->desc + ((void *)end - (void *)tq->stat));
}
write_sgl(skb, tq, sgl, end, 0, addr);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 8132c78..ad1468b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1300,8 +1300,6 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
skb->ip_summed = CHECKSUM_COMPLETE;
}
- skb->dev = netdev;
-
if (vlan_stripped)
__vlan_hwaccel_put_tag(skb, vlan_tci);
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index d3cd489..f879e92 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -3973,7 +3973,7 @@ DevicePresent(struct net_device *dev, u_long aprom_addr)
tmp = srom_rd(aprom_addr, i);
*p++ = cpu_to_le16(tmp);
}
- de4x5_dbg_srom((struct de4x5_srom *)&lp->srom);
+ de4x5_dbg_srom(&lp->srom);
}
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index c5c4c0e..330d59a 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "4.2.220u"
+#define DRV_VER "4.2.248.0u"
#define DRV_NAME "be2net"
#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
@@ -389,6 +389,7 @@ struct be_adapter {
struct delayed_work work;
u16 work_counter;
+ struct delayed_work func_recovery_work;
u32 flags;
/* Ethtool knobs and info */
char fw_ver[FW_VER_LEN];
@@ -396,9 +397,10 @@ struct be_adapter {
u32 *pmac_id; /* MAC addr handle used by BE card */
u32 beacon_state; /* for set_phys_id */
- bool eeh_err;
- bool ue_detected;
+ bool eeh_error;
bool fw_timeout;
+ bool hw_error;
+
u32 port_num;
bool promiscuous;
u32 function_mode;
@@ -435,6 +437,7 @@ struct be_adapter {
u32 max_pmac_cnt; /* Max secondary UC MACs programmable */
u32 uc_macs; /* Count of secondary UC MAC programmed */
u32 msg_enable;
+ int be_get_temp_freq;
};
#define be_physfn(adapter) (!adapter->virtfn)
@@ -454,6 +457,9 @@ struct be_adapter {
#define lancer_chip(adapter) ((adapter->pdev->device == OC_DEVICE_ID3) || \
(adapter->pdev->device == OC_DEVICE_ID4))
+#define skyhawk_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID5)
+
+
#define be_roce_supported(adapter) ((adapter->if_type == SLI_INTF_TYPE_3 || \
adapter->sli_family == SKYHAWK_SLI_FAMILY) && \
(adapter->function_mode & RDMA_ENABLED))
@@ -573,6 +579,11 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
return val;
}
+static inline bool is_ipv4_pkt(struct sk_buff *skb)
+{
+ return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
+}
+
static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
{
u32 addr;
@@ -593,7 +604,19 @@ static inline bool be_multi_rxq(const struct be_adapter *adapter)
static inline bool be_error(struct be_adapter *adapter)
{
- return adapter->eeh_err || adapter->ue_detected || adapter->fw_timeout;
+ return adapter->eeh_error || adapter->hw_error || adapter->fw_timeout;
+}
+
+static inline bool be_crit_error(struct be_adapter *adapter)
+{
+ return adapter->eeh_error || adapter->hw_error;
+}
+
+static inline void be_clear_all_error(struct be_adapter *adapter)
+{
+ adapter->eeh_error = false;
+ adapter->hw_error = false;
+ adapter->fw_timeout = false;
}
static inline bool be_is_wol_excluded(struct be_adapter *adapter)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 8d06ea3..ddfca65 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -19,9 +19,6 @@
#include "be.h"
#include "be_cmds.h"
-/* Must be a power of 2 or else MODULO will BUG_ON */
-static int be_get_temp_freq = 64;
-
static inline void *embedded_payload(struct be_mcc_wrb *wrb)
{
return wrb->payload.embedded_payload;
@@ -115,22 +112,22 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
}
} else {
if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
- be_get_temp_freq = 0;
+ adapter->be_get_temp_freq = 0;
if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
compl_status == MCC_STATUS_ILLEGAL_REQUEST)
goto done;
if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
- dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
- "permitted to execute this cmd (opcode %d)\n",
- opcode);
+ dev_warn(&adapter->pdev->dev,
+ "opcode %d-%d is not permitted\n",
+ opcode, subsystem);
} else {
extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
CQE_STATUS_EXTD_MASK;
- dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
- "status %d, extd-status %d\n",
- opcode, compl_status, extd_status);
+ dev_err(&adapter->pdev->dev,
+ "opcode %d-%d failed:status %d-%d\n",
+ opcode, subsystem, compl_status, extd_status);
}
}
done:
@@ -352,7 +349,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
if (msecs > 4000) {
dev_err(&adapter->pdev->dev, "FW not responding\n");
adapter->fw_timeout = true;
- be_detect_dump_ue(adapter);
+ be_detect_error(adapter);
return -1;
}
@@ -429,12 +426,65 @@ static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
return 0;
}
-int be_cmd_POST(struct be_adapter *adapter)
+int lancer_wait_ready(struct be_adapter *adapter)
+{
+#define SLIPORT_READY_TIMEOUT 30
+ u32 sliport_status;
+ int status = 0, i;
+
+ for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ if (sliport_status & SLIPORT_STATUS_RDY_MASK)
+ break;
+
+ msleep(1000);
+ }
+
+ if (i == SLIPORT_READY_TIMEOUT)
+ status = -1;
+
+ return status;
+}
+
+int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
+{
+ int status;
+ u32 sliport_status, err, reset_needed;
+ status = lancer_wait_ready(adapter);
+ if (!status) {
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ err = sliport_status & SLIPORT_STATUS_ERR_MASK;
+ reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
+ if (err && reset_needed) {
+ iowrite32(SLI_PORT_CONTROL_IP_MASK,
+ adapter->db + SLIPORT_CONTROL_OFFSET);
+
+ /* check adapter has corrected the error */
+ status = lancer_wait_ready(adapter);
+ sliport_status = ioread32(adapter->db +
+ SLIPORT_STATUS_OFFSET);
+ sliport_status &= (SLIPORT_STATUS_ERR_MASK |
+ SLIPORT_STATUS_RN_MASK);
+ if (status || sliport_status)
+ status = -1;
+ } else if (err || reset_needed) {
+ status = -1;
+ }
+ }
+ return status;
+}
+
+int be_fw_wait_ready(struct be_adapter *adapter)
{
u16 stage;
int status, timeout = 0;
struct device *dev = &adapter->pdev->dev;
+ if (lancer_chip(adapter)) {
+ status = lancer_wait_ready(adapter);
+ return status;
+ }
+
do {
status = be_POST_stage_get(adapter, &stage);
if (status) {
@@ -565,6 +615,9 @@ int be_cmd_fw_init(struct be_adapter *adapter)
u8 *wrb;
int status;
+ if (lancer_chip(adapter))
+ return 0;
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -592,6 +645,9 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
u8 *wrb;
int status;
+ if (lancer_chip(adapter))
+ return 0;
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -610,6 +666,7 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
mutex_unlock(&adapter->mbox_lock);
return status;
}
+
int be_cmd_eq_create(struct be_adapter *adapter,
struct be_queue_info *eq, int eq_delay)
{
@@ -1132,7 +1189,7 @@ err:
* Uses MCCQ
*/
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
- u8 *mac, u32 *if_handle, u32 *pmac_id, u32 domain)
+ u32 *if_handle, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_if_create *req;
@@ -1152,17 +1209,13 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
req->hdr.domain = domain;
req->capability_flags = cpu_to_le32(cap_flags);
req->enable_flags = cpu_to_le32(en_flags);
- if (mac)
- memcpy(req->mac_addr, mac, ETH_ALEN);
- else
- req->pmac_invalid = true;
+
+ req->pmac_invalid = true;
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
*if_handle = le32_to_cpu(resp->interface_id);
- if (mac)
- *pmac_id = le32_to_cpu(resp->pmac_id);
}
err:
@@ -1210,9 +1263,6 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
struct be_cmd_req_hdr *hdr;
int status = 0;
- if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
- be_cmd_get_die_temperature(adapter);
-
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -1692,6 +1742,20 @@ int be_cmd_reset_function(struct be_adapter *adapter)
struct be_cmd_req_hdr *req;
int status;
+ if (lancer_chip(adapter)) {
+ status = lancer_wait_ready(adapter);
+ if (!status) {
+ iowrite32(SLI_PORT_CONTROL_IP_MASK,
+ adapter->db + SLIPORT_CONTROL_OFFSET);
+ status = lancer_test_and_set_rdy_state(adapter);
+ }
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Adapter in non recoverable error\n");
+ }
+ return status;
+ }
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -1728,6 +1792,13 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
req->if_id = cpu_to_le32(adapter->if_handle);
req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6);
+
+ if (lancer_chip(adapter) || skyhawk_chip(adapter)) {
+ req->hdr.version = 1;
+ req->enable_rss |= cpu_to_le16(RSS_ENABLE_UDP_IPV4 |
+ RSS_ENABLE_UDP_IPV6);
+ }
+
req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
memcpy(req->cpu_table, rsstable, table_size);
memcpy(req->hash, myhash, sizeof(myhash));
@@ -1805,8 +1876,9 @@ err:
}
int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset, const char *obj_name,
- u32 *data_written, u8 *addn_status)
+ u32 data_size, u32 data_offset,
+ const char *obj_name, u32 *data_written,
+ u8 *change_status, u8 *addn_status)
{
struct be_mcc_wrb *wrb;
struct lancer_cmd_req_write_object *req;
@@ -1862,10 +1934,12 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
status = adapter->flash_status;
resp = embedded_payload(wrb);
- if (!status)
+ if (!status) {
*data_written = le32_to_cpu(resp->actual_write_len);
- else
+ *change_status = resp->change_status;
+ } else {
*addn_status = resp->additional_status;
+ }
return status;
@@ -2330,8 +2404,8 @@ err:
}
/* Uses synchronous MCCQ */
-int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
- bool *pmac_id_active, u32 *pmac_id, u8 *mac)
+int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
+ bool *pmac_id_active, u32 *pmac_id, u8 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_mac_list *req;
@@ -2376,8 +2450,9 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
get_mac_list_cmd.va;
mac_count = resp->true_mac_count + resp->pseudo_mac_count;
/* Mac list returned could contain one or more active mac_ids
- * or one or more pseudo permanant mac addresses. If an active
- * mac_id is present, return first active mac_id found
+ * or one or more true or pseudo permanant mac addresses.
+ * If an active mac_id is present, return first active mac_id
+ * found.
*/
for (i = 0; i < mac_count; i++) {
struct get_list_macaddr *mac_entry;
@@ -2396,7 +2471,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
goto out;
}
}
- /* If no active mac_id found, return first pseudo mac addr */
+ /* If no active mac_id found, return first mac addr */
*pmac_id_active = false;
memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
ETH_ALEN);
@@ -2648,6 +2723,44 @@ err:
return status;
}
+int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_port_name *req;
+ int status;
+
+ if (!lancer_chip(adapter)) {
+ *port_name = adapter->hba_port_num + '0';
+ return 0;
+ }
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
+ NULL);
+ req->hdr.version = 1;
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
+ *port_name = resp->port_name[adapter->hba_port_num];
+ } else {
+ *port_name = adapter->hba_port_num + '0';
+ }
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 9625bf4..45d70de 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -186,6 +186,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
#define OPCODE_COMMON_GET_BEACON_STATE 70
#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
+#define OPCODE_COMMON_GET_PORT_NAME 77
#define OPCODE_COMMON_GET_PHY_DETAILS 102
#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
@@ -1088,6 +1089,8 @@ struct be_cmd_resp_query_fw_cfg {
#define RSS_ENABLE_TCP_IPV4 0x2
#define RSS_ENABLE_IPV6 0x4
#define RSS_ENABLE_TCP_IPV6 0x8
+#define RSS_ENABLE_UDP_IPV4 0x10
+#define RSS_ENABLE_UDP_IPV6 0x20
struct be_cmd_req_rss_config {
struct be_cmd_req_hdr hdr;
@@ -1163,6 +1166,8 @@ struct lancer_cmd_req_write_object {
u32 addr_high;
};
+#define LANCER_NO_RESET_NEEDED 0x00
+#define LANCER_FW_RESET_NEEDED 0x02
struct lancer_cmd_resp_write_object {
u8 opcode;
u8 subsystem;
@@ -1173,6 +1178,8 @@ struct lancer_cmd_resp_write_object {
u32 resp_len;
u32 actual_resp_len;
u32 actual_write_len;
+ u8 change_status;
+ u8 rsvd3[3];
};
/************************ Lancer Read FW info **************/
@@ -1502,6 +1509,17 @@ struct be_cmd_resp_get_hsw_config {
u32 rsvd;
};
+/******************* get port names ***************/
+struct be_cmd_req_get_port_name {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd0;
+};
+
+struct be_cmd_resp_get_port_name {
+ struct be_cmd_req_hdr hdr;
+ u8 port_name[4];
+};
+
/*************** HW Stats Get v1 **********************************/
#define BE_TXP_SW_SZ 48
struct be_port_rxf_stats_v1 {
@@ -1566,7 +1584,7 @@ struct be_hw_stats_v1 {
u32 rsvd0[BE_TXP_SW_SZ];
struct be_erx_stats_v1 erx;
struct be_pmem_stats pmem;
- u32 rsvd1[3];
+ u32 rsvd1[18];
};
struct be_cmd_req_get_stats_v1 {
@@ -1656,7 +1674,7 @@ struct be_cmd_req_set_ext_fat_caps {
};
extern int be_pci_fnum_get(struct be_adapter *adapter);
-extern int be_cmd_POST(struct be_adapter *adapter);
+extern int be_fw_wait_ready(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
u8 type, bool permanent, u32 if_handle, u32 pmac_id);
extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
@@ -1664,8 +1682,7 @@ extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
int pmac_id, u32 domain);
extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
- u32 en_flags, u8 *mac, u32 *if_handle, u32 *pmac_id,
- u32 domain);
+ u32 en_flags, u32 *if_handle, u32 domain);
extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
u32 domain);
extern int be_cmd_eq_create(struct be_adapter *adapter,
@@ -1719,10 +1736,11 @@ extern int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_dma_mem *cmd, u32 flash_oper,
u32 flash_opcode, u32 buf_size);
extern int lancer_cmd_write_object(struct be_adapter *adapter,
- struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset,
- const char *obj_name,
- u32 *data_written, u8 *addn_status);
+ struct be_dma_mem *cmd,
+ u32 data_size, u32 data_offset,
+ const char *obj_name,
+ u32 *data_written, u8 *change_status,
+ u8 *addn_status);
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 data_size, u32 data_offset, const char *obj_name,
u32 *data_read, u32 *eof, u8 *addn_status);
@@ -1745,14 +1763,15 @@ extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
u8 loopback_type, u8 enable);
extern int be_cmd_get_phy_info(struct be_adapter *adapter);
extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
-extern void be_detect_dump_ue(struct be_adapter *adapter);
+extern void be_detect_error(struct be_adapter *adapter);
extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
extern int be_cmd_req_native_mode(struct be_adapter *adapter);
extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
-extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
- bool *pmac_id_active, u32 *pmac_id, u8 *mac);
+extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
+ bool *pmac_id_active, u32 *pmac_id,
+ u8 domain);
extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
u8 mac_count, u32 domain);
extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
@@ -1765,4 +1784,7 @@ extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
struct be_dma_mem *cmd,
struct be_fat_conf_params *cfgs);
+extern int lancer_wait_ready(struct be_adapter *adapter);
+extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
+extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 63e51d4..e34be1c 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -648,7 +648,7 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
struct be_adapter *adapter = netdev_priv(netdev);
int status;
- if (ecmd->autoneg != 0)
+ if (ecmd->autoneg != adapter->phy.fc_autoneg)
return -EINVAL;
adapter->tx_fc = ecmd->tx_pause;
adapter->rx_fc = ecmd->rx_pause;
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index d9fb0c5..b755f70 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -45,20 +45,19 @@
#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
-/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
+/* Lancer SLIPORT registers */
#define SLIPORT_STATUS_OFFSET 0x404
#define SLIPORT_CONTROL_OFFSET 0x408
#define SLIPORT_ERROR1_OFFSET 0x40C
#define SLIPORT_ERROR2_OFFSET 0x410
+#define PHYSDEV_CONTROL_OFFSET 0x414
#define SLIPORT_STATUS_ERR_MASK 0x80000000
#define SLIPORT_STATUS_RN_MASK 0x01000000
#define SLIPORT_STATUS_RDY_MASK 0x00800000
-
-
#define SLI_PORT_CONTROL_IP_MASK 0x08000000
-
-#define PCICFG_CUST_SCRATCHPAD_CSR 0x1EC
+#define PHYSDEV_CONTROL_FW_RESET_MASK 0x00000002
+#define PHYSDEV_CONTROL_INP_MASK 0x40000000
/********* Memory BAR register ************/
#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 08efd30..7e989d0 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -155,7 +155,7 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
{
u32 reg, enabled;
- if (adapter->eeh_err)
+ if (adapter->eeh_error)
return;
pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
@@ -201,7 +201,7 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
DB_EQ_RING_ID_EXT_MASK_SHIFT);
- if (adapter->eeh_err)
+ if (adapter->eeh_error)
return;
if (arm)
@@ -220,7 +220,7 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
DB_CQ_RING_ID_EXT_MASK_SHIFT);
- if (adapter->eeh_err)
+ if (adapter->eeh_error)
return;
if (arm)
@@ -558,6 +558,7 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
wrb->frag_pa_hi = upper_32_bits(addr);
wrb->frag_pa_lo = addr & 0xFFFFFFFF;
wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
+ wrb->rsvd0 = 0;
}
static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
@@ -576,6 +577,11 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
return vlan_tag;
}
+static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
+{
+ return vlan_tx_tag_present(skb) || adapter->pvid;
+}
+
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
struct sk_buff *skb, u32 wrb_cnt, u32 len)
{
@@ -703,39 +709,64 @@ dma_err:
return 0;
}
+static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
+ struct sk_buff *skb)
+{
+ u16 vlan_tag = 0;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return skb;
+
+ if (vlan_tx_tag_present(skb)) {
+ vlan_tag = be_get_tx_vlan_tag(adapter, skb);
+ __vlan_put_tag(skb, vlan_tag);
+ skb->vlan_tci = 0;
+ }
+
+ return skb;
+}
+
static netdev_tx_t be_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
struct be_queue_info *txq = &txo->q;
+ struct iphdr *ip = NULL;
u32 wrb_cnt = 0, copied = 0;
- u32 start = txq->head;
+ u32 start = txq->head, eth_hdr_len;
bool dummy_wrb, stopped = false;
- /* For vlan tagged pkts, BE
- * 1) calculates checksum even when CSO is not requested
- * 2) calculates checksum wrongly for padded pkt less than
- * 60 bytes long.
- * As a workaround disable TX vlan offloading in such cases.
+ eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
+ VLAN_ETH_HLEN : ETH_HLEN;
+
+ /* HW has a bug which considers padding bytes as legal
+ * and modifies the IPv4 hdr's 'tot_len' field
*/
- if (unlikely(vlan_tx_tag_present(skb) &&
- (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (unlikely(!skb))
- goto tx_drop;
+ if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
+ is_ipv4_pkt(skb)) {
+ ip = (struct iphdr *)ip_hdr(skb);
+ pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
+ }
- skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
+ /* HW has a bug wherein it will calculate CSUM for VLAN
+ * pkts even though it is disabled.
+ * Manually insert VLAN in pkt.
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL &&
+ be_vlan_tag_chk(adapter, skb)) {
+ skb = be_insert_vlan_in_pkt(adapter, skb);
if (unlikely(!skb))
goto tx_drop;
-
- skb->vlan_tci = 0;
}
wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
if (copied) {
+ int gso_segs = skb_shinfo(skb)->gso_segs;
+
/* record the sent skb in the sent_skb table */
BUG_ON(txo->sent_skb_list[start]);
txo->sent_skb_list[start] = skb;
@@ -753,8 +784,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
be_txq_notify(adapter, txq->id, wrb_cnt);
- be_tx_stats_update(txo, wrb_cnt, copied,
- skb_shinfo(skb)->gso_segs, stopped);
+ be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
} else {
txq->head = start;
dev_kfree_skb_any(skb);
@@ -785,19 +815,12 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
* A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
* If the user configures more, place BE in vlan promiscuous mode.
*/
-static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
+static int be_vid_config(struct be_adapter *adapter)
{
- struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
- u16 vtag[BE_NUM_VLANS_SUPPORTED];
- u16 ntags = 0, i;
+ u16 vids[BE_NUM_VLANS_SUPPORTED];
+ u16 num = 0, i;
int status = 0;
- if (vf) {
- vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
- status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
- 1, 1, 0);
- }
-
/* No need to further configure vids if in promiscuous mode */
if (adapter->promiscuous)
return 0;
@@ -808,10 +831,10 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
/* Construct VLAN Table to give to HW */
for (i = 0; i < VLAN_N_VID; i++)
if (adapter->vlan_tag[i])
- vtag[ntags++] = cpu_to_le16(i);
+ vids[num++] = cpu_to_le16(i);
status = be_cmd_vlan_config(adapter, adapter->if_handle,
- vtag, ntags, 1, 0);
+ vids, num, 1, 0);
/* Set to VLAN promisc mode as setting VLAN filter failed */
if (status) {
@@ -840,7 +863,7 @@ static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
adapter->vlan_tag[vid] = 1;
if (adapter->vlans_added <= (adapter->max_vlans + 1))
- status = be_vid_config(adapter, false, 0);
+ status = be_vid_config(adapter);
if (!status)
adapter->vlans_added++;
@@ -862,7 +885,7 @@ static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
adapter->vlan_tag[vid] = 0;
if (adapter->vlans_added <= adapter->max_vlans)
- status = be_vid_config(adapter, false, 0);
+ status = be_vid_config(adapter);
if (!status)
adapter->vlans_added--;
@@ -889,7 +912,7 @@ static void be_set_rx_mode(struct net_device *netdev)
be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
if (adapter->vlans_added)
- be_vid_config(adapter, false, 0);
+ be_vid_config(adapter);
}
/* Enable multicast promisc if num configured exceeds what we support */
@@ -1056,13 +1079,16 @@ static int be_find_vfs(struct be_adapter *adapter, int vf_state)
u16 offset, stride;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return 0;
pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
while (dev) {
vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
- if (dev->is_virtfn && dev->devfn == vf_fn) {
+ if (dev->is_virtfn && dev->devfn == vf_fn &&
+ dev->bus->number == pdev->bus->number) {
vfs++;
if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
assigned_vfs++;
@@ -1708,9 +1734,10 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
int i;
for_all_evt_queues(adapter, eqo, i) {
- be_eq_clean(eqo);
- if (eqo->q.created)
+ if (eqo->q.created) {
+ be_eq_clean(eqo);
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
+ }
be_queue_free(adapter, &eqo->q);
}
}
@@ -1897,6 +1924,12 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
*/
adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
num_irqs(adapter) + 1 : 1;
+ if (adapter->num_rx_qs != MAX_RX_QS) {
+ rtnl_lock();
+ netif_set_real_num_rx_queues(adapter->netdev,
+ adapter->num_rx_qs);
+ rtnl_unlock();
+ }
adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
for_all_rx_queues(adapter, rxo, i) {
@@ -2066,13 +2099,13 @@ int be_poll(struct napi_struct *napi, int budget)
return max_work;
}
-void be_detect_dump_ue(struct be_adapter *adapter)
+void be_detect_error(struct be_adapter *adapter)
{
u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
u32 i;
- if (adapter->eeh_err || adapter->ue_detected)
+ if (be_crit_error(adapter))
return;
if (lancer_chip(adapter)) {
@@ -2093,16 +2126,24 @@ void be_detect_dump_ue(struct be_adapter *adapter)
pci_read_config_dword(adapter->pdev,
PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
- ue_lo = (ue_lo & (~ue_lo_mask));
- ue_hi = (ue_hi & (~ue_hi_mask));
+ ue_lo = (ue_lo & ~ue_lo_mask);
+ ue_hi = (ue_hi & ~ue_hi_mask);
}
if (ue_lo || ue_hi ||
sliport_status & SLIPORT_STATUS_ERR_MASK) {
- adapter->ue_detected = true;
- adapter->eeh_err = true;
+ adapter->hw_error = true;
+ dev_err(&adapter->pdev->dev,
+ "Error detected in the card\n");
+ }
+
+ if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
dev_err(&adapter->pdev->dev,
- "Unrecoverable error in the card\n");
+ "ERR: sliport status 0x%x\n", sliport_status);
+ dev_err(&adapter->pdev->dev,
+ "ERR: sliport error1 0x%x\n", sliport_err1);
+ dev_err(&adapter->pdev->dev,
+ "ERR: sliport error2 0x%x\n", sliport_err2);
}
if (ue_lo) {
@@ -2112,6 +2153,7 @@ void be_detect_dump_ue(struct be_adapter *adapter)
"UE: %s bit set\n", ue_status_low_desc[i]);
}
}
+
if (ue_hi) {
for (i = 0; ue_hi; ue_hi >>= 1, i++) {
if (ue_hi & 1)
@@ -2120,14 +2162,6 @@ void be_detect_dump_ue(struct be_adapter *adapter)
}
}
- if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
- dev_err(&adapter->pdev->dev,
- "sliport status 0x%x\n", sliport_status);
- dev_err(&adapter->pdev->dev,
- "sliport error1 0x%x\n", sliport_err1);
- dev_err(&adapter->pdev->dev,
- "sliport error2 0x%x\n", sliport_err2);
- }
}
static void be_msix_disable(struct be_adapter *adapter)
@@ -2140,12 +2174,14 @@ static void be_msix_disable(struct be_adapter *adapter)
static uint be_num_rss_want(struct be_adapter *adapter)
{
+ u32 num = 0;
if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
!sriov_want(adapter) && be_physfn(adapter) &&
- !be_is_mc(adapter))
- return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
- else
- return 0;
+ !be_is_mc(adapter)) {
+ num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
+ num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
+ }
+ return num;
}
static void be_msix_enable(struct be_adapter *adapter)
@@ -2539,11 +2575,7 @@ static int be_clear(struct be_adapter *adapter)
be_tx_queues_destroy(adapter);
be_evt_queues_destroy(adapter);
- /* tell fw we're done with firing cmds */
- be_cmd_fw_clean(adapter);
-
be_msix_disable(adapter);
- pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 0);
return 0;
}
@@ -2601,8 +2633,8 @@ static int be_vf_setup(struct be_adapter *adapter)
cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST;
for_all_vfs(adapter, vf_cfg, vf) {
- status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
- &vf_cfg->if_handle, NULL, vf + 1);
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ &vf_cfg->if_handle, vf + 1);
if (status)
goto err;
}
@@ -2642,29 +2674,43 @@ static void be_setup_init(struct be_adapter *adapter)
adapter->phy.forced_port_speed = -1;
}
-static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
+static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
+ bool *active_mac, u32 *pmac_id)
{
- u32 pmac_id;
- int status;
- bool pmac_id_active;
+ int status = 0;
- status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
- &pmac_id, mac);
- if (status != 0)
- goto do_none;
+ if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
+ memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
+ if (!lancer_chip(adapter) && !be_physfn(adapter))
+ *active_mac = true;
+ else
+ *active_mac = false;
- if (pmac_id_active) {
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK,
- false, adapter->if_handle, pmac_id);
+ return status;
+ }
- if (!status)
- adapter->pmac_id[0] = pmac_id;
+ if (lancer_chip(adapter)) {
+ status = be_cmd_get_mac_from_list(adapter, mac,
+ active_mac, pmac_id, 0);
+ if (*active_mac) {
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK,
+ false, if_handle,
+ *pmac_id);
+ }
+ } else if (be_physfn(adapter)) {
+ /* For BE3, for PF get permanent MAC */
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK, true,
+ 0, 0);
+ *active_mac = false;
} else {
- status = be_cmd_pmac_add(adapter, mac,
- adapter->if_handle, &adapter->pmac_id[0], 0);
+ /* For BE3, for VF get soft MAC assigned by PF*/
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK, false,
+ if_handle, 0);
+ *active_mac = true;
}
-do_none:
return status;
}
@@ -2685,12 +2731,12 @@ static int be_get_config(struct be_adapter *adapter)
static int be_setup(struct be_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
struct device *dev = &adapter->pdev->dev;
u32 cap_flags, en_flags;
u32 tx_fc, rx_fc;
int status;
u8 mac[ETH_ALEN];
+ bool active_mac;
be_setup_init(adapter);
@@ -2716,14 +2762,6 @@ static int be_setup(struct be_adapter *adapter)
if (status)
goto err;
- memset(mac, 0, ETH_ALEN);
- status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
- true /*permanent */, 0, 0);
- if (status)
- return status;
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
- memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
-
en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
@@ -2733,27 +2771,29 @@ static int be_setup(struct be_adapter *adapter)
cap_flags |= BE_IF_FLAGS_RSS;
en_flags |= BE_IF_FLAGS_RSS;
}
+
status = be_cmd_if_create(adapter, cap_flags, en_flags,
- netdev->dev_addr, &adapter->if_handle,
- &adapter->pmac_id[0], 0);
+ &adapter->if_handle, 0);
if (status != 0)
goto err;
- /* The VF's permanent mac queried from card is incorrect.
- * For BEx: Query the mac configued by the PF using if_handle
- * For Lancer: Get and use mac_list to obtain mac address.
- */
- if (!be_physfn(adapter)) {
- if (lancer_chip(adapter))
- status = be_add_mac_from_list(adapter, mac);
- else
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK, false,
- adapter->if_handle, 0);
- if (!status) {
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
- memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
- }
+ memset(mac, 0, ETH_ALEN);
+ active_mac = false;
+ status = be_get_mac_addr(adapter, mac, adapter->if_handle,
+ &active_mac, &adapter->pmac_id[0]);
+ if (status != 0)
+ goto err;
+
+ if (!active_mac) {
+ status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
+ &adapter->pmac_id[0], 0);
+ if (status != 0)
+ goto err;
+ }
+
+ if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
}
status = be_tx_qs_create(adapter);
@@ -2762,7 +2802,8 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
- be_vid_config(adapter, false, 0);
+ if (adapter->vlans_added)
+ be_vid_config(adapter);
be_set_rx_mode(adapter->netdev);
@@ -2772,8 +2813,6 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
- pcie_set_readrq(adapter->pdev, 4096);
-
if (be_physfn(adapter) && num_vfs) {
if (adapter->dev_num_vfs)
be_vf_setup(adapter);
@@ -2787,8 +2826,6 @@ static int be_setup(struct be_adapter *adapter)
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
-
- pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 1);
return 0;
err:
be_clear(adapter);
@@ -3032,6 +3069,40 @@ static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
return 0;
}
+static int lancer_wait_idle(struct be_adapter *adapter)
+{
+#define SLIPORT_IDLE_TIMEOUT 30
+ u32 reg_val;
+ int status = 0, i;
+
+ for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
+ reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
+ if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
+ break;
+
+ ssleep(1);
+ }
+
+ if (i == SLIPORT_IDLE_TIMEOUT)
+ status = -1;
+
+ return status;
+}
+
+static int lancer_fw_reset(struct be_adapter *adapter)
+{
+ int status = 0;
+
+ status = lancer_wait_idle(adapter);
+ if (status)
+ return status;
+
+ iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
+ PHYSDEV_CONTROL_OFFSET);
+
+ return status;
+}
+
static int lancer_fw_download(struct be_adapter *adapter,
const struct firmware *fw)
{
@@ -3046,6 +3117,7 @@ static int lancer_fw_download(struct be_adapter *adapter,
u32 offset = 0;
int status = 0;
u8 add_status = 0;
+ u8 change_status;
if (!IS_ALIGNED(fw->size, sizeof(u32))) {
dev_err(&adapter->pdev->dev,
@@ -3078,9 +3150,10 @@ static int lancer_fw_download(struct be_adapter *adapter,
memcpy(dest_image_ptr, data_ptr, chunk_size);
status = lancer_cmd_write_object(adapter, &flash_cmd,
- chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
- &data_written, &add_status);
-
+ chunk_size, offset,
+ LANCER_FW_DOWNLOAD_LOCATION,
+ &data_written, &change_status,
+ &add_status);
if (status)
break;
@@ -3092,8 +3165,10 @@ static int lancer_fw_download(struct be_adapter *adapter,
if (!status) {
/* Commit the FW written */
status = lancer_cmd_write_object(adapter, &flash_cmd,
- 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
- &data_written, &add_status);
+ 0, offset,
+ LANCER_FW_DOWNLOAD_LOCATION,
+ &data_written, &change_status,
+ &add_status);
}
dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
@@ -3106,6 +3181,20 @@ static int lancer_fw_download(struct be_adapter *adapter,
goto lancer_fw_exit;
}
+ if (change_status == LANCER_FW_RESET_NEEDED) {
+ status = lancer_fw_reset(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Adapter busy for FW reset.\n"
+ "New FW will not be active.\n");
+ goto lancer_fw_exit;
+ }
+ } else if (change_status != LANCER_NO_RESET_NEEDED) {
+ dev_err(&adapter->pdev->dev,
+ "System reboot required for new FW"
+ " to be active\n");
+ }
+
dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
lancer_fw_exit:
return status;
@@ -3236,7 +3325,7 @@ static void be_netdev_init(struct net_device *netdev)
netdev->flags |= IFF_MULTICAST;
- netif_set_gso_max_size(netdev, 65535);
+ netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
netdev->netdev_ops = &be_netdev_ops;
@@ -3434,10 +3523,15 @@ static void __devexit be_remove(struct pci_dev *pdev)
be_roce_dev_remove(adapter);
+ cancel_delayed_work_sync(&adapter->func_recovery_work);
+
unregister_netdev(adapter->netdev);
be_clear(adapter);
+ /* tell fw we're done with firing cmds */
+ be_cmd_fw_clean(adapter);
+
be_stats_cleanup(adapter);
be_ctrl_cleanup(adapter);
@@ -3529,6 +3623,9 @@ static int be_get_initial_config(struct be_adapter *adapter)
if (be_is_wol_supported(adapter))
adapter->wol = true;
+ /* Must be a power of 2 or else MODULO will BUG_ON */
+ adapter->be_get_temp_freq = 64;
+
level = be_get_fw_log_level(adapter);
adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
@@ -3584,101 +3681,68 @@ static int be_dev_type_check(struct be_adapter *adapter)
return 0;
}
-static int lancer_wait_ready(struct be_adapter *adapter)
+static int lancer_recover_func(struct be_adapter *adapter)
{
-#define SLIPORT_READY_TIMEOUT 30
- u32 sliport_status;
- int status = 0, i;
+ int status;
- for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
- sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
- if (sliport_status & SLIPORT_STATUS_RDY_MASK)
- break;
+ status = lancer_test_and_set_rdy_state(adapter);
+ if (status)
+ goto err;
- msleep(1000);
- }
+ if (netif_running(adapter->netdev))
+ be_close(adapter->netdev);
- if (i == SLIPORT_READY_TIMEOUT)
- status = -1;
+ be_clear(adapter);
- return status;
-}
+ adapter->hw_error = false;
+ adapter->fw_timeout = false;
-static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
-{
- int status;
- u32 sliport_status, err, reset_needed;
- status = lancer_wait_ready(adapter);
- if (!status) {
- sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
- err = sliport_status & SLIPORT_STATUS_ERR_MASK;
- reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
- if (err && reset_needed) {
- iowrite32(SLI_PORT_CONTROL_IP_MASK,
- adapter->db + SLIPORT_CONTROL_OFFSET);
-
- /* check adapter has corrected the error */
- status = lancer_wait_ready(adapter);
- sliport_status = ioread32(adapter->db +
- SLIPORT_STATUS_OFFSET);
- sliport_status &= (SLIPORT_STATUS_ERR_MASK |
- SLIPORT_STATUS_RN_MASK);
- if (status || sliport_status)
- status = -1;
- } else if (err || reset_needed) {
- status = -1;
- }
+ status = be_setup(adapter);
+ if (status)
+ goto err;
+
+ if (netif_running(adapter->netdev)) {
+ status = be_open(adapter->netdev);
+ if (status)
+ goto err;
}
+
+ dev_err(&adapter->pdev->dev,
+ "Adapter SLIPORT recovery succeeded\n");
+ return 0;
+err:
+ dev_err(&adapter->pdev->dev,
+ "Adapter SLIPORT recovery failed\n");
+
return status;
}
-static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
+static void be_func_recovery_task(struct work_struct *work)
{
+ struct be_adapter *adapter =
+ container_of(work, struct be_adapter, func_recovery_work.work);
int status;
- u32 sliport_status;
-
- if (adapter->eeh_err || adapter->ue_detected)
- return;
- sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ be_detect_error(adapter);
- if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
- dev_err(&adapter->pdev->dev,
- "Adapter in error state."
- "Trying to recover.\n");
+ if (adapter->hw_error && lancer_chip(adapter)) {
- status = lancer_test_and_set_rdy_state(adapter);
- if (status)
- goto err;
+ if (adapter->eeh_error)
+ goto out;
+ rtnl_lock();
netif_device_detach(adapter->netdev);
+ rtnl_unlock();
- if (netif_running(adapter->netdev))
- be_close(adapter->netdev);
-
- be_clear(adapter);
-
- adapter->fw_timeout = false;
-
- status = be_setup(adapter);
- if (status)
- goto err;
-
- if (netif_running(adapter->netdev)) {
- status = be_open(adapter->netdev);
- if (status)
- goto err;
- }
-
- netif_device_attach(adapter->netdev);
+ status = lancer_recover_func(adapter);
- dev_err(&adapter->pdev->dev,
- "Adapter error recovery succeeded\n");
+ if (!status)
+ netif_device_attach(adapter->netdev);
}
- return;
-err:
- dev_err(&adapter->pdev->dev,
- "Adapter error recovery failed\n");
+
+out:
+ schedule_delayed_work(&adapter->func_recovery_work,
+ msecs_to_jiffies(1000));
}
static void be_worker(struct work_struct *work)
@@ -3689,11 +3753,6 @@ static void be_worker(struct work_struct *work)
struct be_eq_obj *eqo;
int i;
- if (lancer_chip(adapter))
- lancer_test_and_recover_fn_err(adapter);
-
- be_detect_dump_ue(adapter);
-
/* when interrupts are not yet enabled, just reap any pending
* mcc completions */
if (!netif_running(adapter->netdev)) {
@@ -3709,6 +3768,9 @@ static void be_worker(struct work_struct *work)
be_cmd_get_stats(adapter, &adapter->stats_cmd);
}
+ if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
+ be_cmd_get_die_temperature(adapter);
+
for_all_rx_queues(adapter, rxo, i) {
if (rxo->rx_post_starved) {
rxo->rx_post_starved = false;
@@ -3726,10 +3788,7 @@ reschedule:
static bool be_reset_required(struct be_adapter *adapter)
{
- u32 reg;
-
- pci_read_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, &reg);
- return reg;
+ return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
}
static int __devinit be_probe(struct pci_dev *pdev,
@@ -3738,6 +3797,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
int status = 0;
struct be_adapter *adapter;
struct net_device *netdev;
+ char port_name;
status = pci_enable_device(pdev);
if (status)
@@ -3748,7 +3808,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
goto disable_dev;
pci_set_master(pdev);
- netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
+ netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
if (netdev == NULL) {
status = -ENOMEM;
goto rel_reg;
@@ -3779,22 +3839,9 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status)
goto free_netdev;
- if (lancer_chip(adapter)) {
- status = lancer_wait_ready(adapter);
- if (!status) {
- iowrite32(SLI_PORT_CONTROL_IP_MASK,
- adapter->db + SLIPORT_CONTROL_OFFSET);
- status = lancer_test_and_set_rdy_state(adapter);
- }
- if (status) {
- dev_err(&pdev->dev, "Adapter in non recoverable error\n");
- goto ctrl_clean;
- }
- }
-
/* sync up with fw's ready state */
if (be_physfn(adapter)) {
- status = be_cmd_POST(adapter);
+ status = be_fw_wait_ready(adapter);
if (status)
goto ctrl_clean;
}
@@ -3825,6 +3872,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
goto stats_clean;
INIT_DELAYED_WORK(&adapter->work, be_worker);
+ INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
adapter->rx_fc = adapter->tx_fc = true;
status = be_setup(adapter);
@@ -3838,8 +3886,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
be_roce_dev_add(adapter);
- dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
- adapter->port_num);
+ schedule_delayed_work(&adapter->func_recovery_work,
+ msecs_to_jiffies(1000));
+
+ be_cmd_query_port_name(adapter, &port_name);
+
+ dev_info(&pdev->dev, "%s: %s port %c\n", netdev->name, nic_name(pdev),
+ port_name);
return 0;
@@ -3871,6 +3924,8 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
if (adapter->wol)
be_setup_wol(adapter, true);
+ cancel_delayed_work_sync(&adapter->func_recovery_work);
+
netif_device_detach(netdev);
if (netif_running(netdev)) {
rtnl_lock();
@@ -3911,6 +3966,9 @@ static int be_resume(struct pci_dev *pdev)
be_open(netdev);
rtnl_unlock();
}
+
+ schedule_delayed_work(&adapter->func_recovery_work,
+ msecs_to_jiffies(1000));
netif_device_attach(netdev);
if (adapter->wol)
@@ -3930,6 +3988,7 @@ static void be_shutdown(struct pci_dev *pdev)
return;
cancel_delayed_work_sync(&adapter->work);
+ cancel_delayed_work_sync(&adapter->func_recovery_work);
netif_device_detach(adapter->netdev);
@@ -3949,9 +4008,13 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
dev_err(&adapter->pdev->dev, "EEH error detected\n");
- adapter->eeh_err = true;
+ adapter->eeh_error = true;
+ cancel_delayed_work_sync(&adapter->func_recovery_work);
+
+ rtnl_lock();
netif_device_detach(netdev);
+ rtnl_unlock();
if (netif_running(netdev)) {
rtnl_lock();
@@ -3979,9 +4042,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
int status;
dev_info(&adapter->pdev->dev, "EEH reset\n");
- adapter->eeh_err = false;
- adapter->ue_detected = false;
- adapter->fw_timeout = false;
+ be_clear_all_error(adapter);
status = pci_enable_device(pdev);
if (status)
@@ -3992,7 +4053,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
pci_restore_state(pdev);
/* Check if card is ok and fw is ready */
- status = be_cmd_POST(adapter);
+ status = be_fw_wait_ready(adapter);
if (status)
return PCI_ERS_RESULT_DISCONNECT;
@@ -4014,6 +4075,10 @@ static void be_eeh_resume(struct pci_dev *pdev)
if (status)
goto err;
+ status = be_cmd_reset_function(adapter);
+ if (status)
+ goto err;
+
status = be_setup(adapter);
if (status)
goto err;
@@ -4023,6 +4088,9 @@ static void be_eeh_resume(struct pci_dev *pdev)
if (status)
goto err;
}
+
+ schedule_delayed_work(&adapter->func_recovery_work,
+ msecs_to_jiffies(1000));
netif_device_attach(netdev);
return;
err:
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index a381678..2029788 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -902,7 +902,7 @@ static const struct net_device_ops ethoc_netdev_ops = {
};
/**
- * ethoc_probe() - initialize OpenCores ethernet MAC
+ * ethoc_probe - initialize OpenCores ethernet MAC
* pdev: platform device
*/
static int __devinit ethoc_probe(struct platform_device *pdev)
@@ -1140,7 +1140,7 @@ out:
}
/**
- * ethoc_remove() - shutdown OpenCores ethernet MAC
+ * ethoc_remove - shutdown OpenCores ethernet MAC
* @pdev: platform device
*/
static int __devexit ethoc_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index ff7f4c5..fffd205 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -49,6 +49,7 @@
#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/regulator/consumer.h>
#include <asm/cacheflush.h>
@@ -1388,8 +1389,8 @@ fec_set_mac_address(struct net_device *ndev, void *p)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * fec_poll_controller: FEC Poll controller function
+/**
+ * fec_poll_controller - FEC Poll controller function
* @dev: The FEC network adapter
*
* Polled functionality used by netconsole and others in non interrupt mode
@@ -1506,18 +1507,25 @@ static int __devinit fec_get_phy_mode_dt(struct platform_device *pdev)
static void __devinit fec_reset_phy(struct platform_device *pdev)
{
int err, phy_reset;
+ int msec = 1;
struct device_node *np = pdev->dev.of_node;
if (!np)
return;
+ of_property_read_u32(np, "phy-reset-duration", &msec);
+ /* A sane reset duration should not be longer than 1s */
+ if (msec > 1000)
+ msec = 1;
+
phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
- err = gpio_request_one(phy_reset, GPIOF_OUT_INIT_LOW, "phy-reset");
+ err = devm_gpio_request_one(&pdev->dev, phy_reset,
+ GPIOF_OUT_INIT_LOW, "phy-reset");
if (err) {
pr_debug("FEC: failed to get gpio phy-reset: %d\n", err);
return;
}
- msleep(1);
+ msleep(msec);
gpio_set_value(phy_reset, 1);
}
#else /* CONFIG_OF */
@@ -1546,6 +1554,7 @@ fec_probe(struct platform_device *pdev)
const struct of_device_id *of_id;
static int dev_id;
struct pinctrl *pinctrl;
+ struct regulator *reg_phy;
of_id = of_match_device(fec_dt_ids, &pdev->dev);
if (of_id)
@@ -1593,8 +1602,6 @@ fec_probe(struct platform_device *pdev)
fep->phy_interface = ret;
}
- fec_reset_phy(pdev);
-
for (i = 0; i < FEC_IRQ_NUM; i++) {
irq = platform_get_irq(pdev, i);
if (irq < 0) {
@@ -1634,6 +1641,18 @@ fec_probe(struct platform_device *pdev)
clk_prepare_enable(fep->clk_ahb);
clk_prepare_enable(fep->clk_ipg);
+ reg_phy = devm_regulator_get(&pdev->dev, "phy");
+ if (!IS_ERR(reg_phy)) {
+ ret = regulator_enable(reg_phy);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to enable phy regulator: %d\n", ret);
+ goto failed_regulator;
+ }
+ }
+
+ fec_reset_phy(pdev);
+
ret = fec_enet_init(ndev);
if (ret)
goto failed_init;
@@ -1655,6 +1674,7 @@ failed_register:
fec_enet_mii_remove(fep);
failed_mii_init:
failed_init:
+failed_regulator:
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
failed_pin:
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index f7f0bf5..9527b28 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -47,6 +47,9 @@
#include "gianfar.h"
#include "fsl_pq_mdio.h"
+/* Number of microseconds to wait for an MII register to respond */
+#define MII_TIMEOUT 1000
+
struct fsl_pq_mdio_priv {
void __iomem *map;
struct fsl_pq_mdio __iomem *regs;
@@ -64,6 +67,8 @@ struct fsl_pq_mdio_priv {
int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
int regnum, u16 value)
{
+ u32 status;
+
/* Set the PHY address and the register address we want to write */
out_be32(&regs->miimadd, (mii_id << 8) | regnum);
@@ -71,10 +76,10 @@ int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
out_be32(&regs->miimcon, value);
/* Wait for the transaction to finish */
- while (in_be32(&regs->miimind) & MIIMIND_BUSY)
- cpu_relax();
+ status = spin_event_timeout(!(in_be32(&regs->miimind) & MIIMIND_BUSY),
+ MII_TIMEOUT, 0);
- return 0;
+ return status ? 0 : -ETIMEDOUT;
}
/*
@@ -91,6 +96,7 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
int mii_id, int regnum)
{
u16 value;
+ u32 status;
/* Set the PHY address and the register address we want to read */
out_be32(&regs->miimadd, (mii_id << 8) | regnum);
@@ -99,9 +105,12 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
out_be32(&regs->miimcom, 0);
out_be32(&regs->miimcom, MII_READ_COMMAND);
- /* Wait for the transaction to finish */
- while (in_be32(&regs->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY))
- cpu_relax();
+ /* Wait for the transaction to finish, normally less than 100us */
+ status = spin_event_timeout(!(in_be32(&regs->miimind) &
+ (MIIMIND_NOTVALID | MIIMIND_BUSY)),
+ MII_TIMEOUT, 0);
+ if (!status)
+ return -ETIMEDOUT;
/* Grab the value of the register from miimstat */
value = in_be32(&regs->miimstat);
@@ -144,7 +153,7 @@ int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
static int fsl_pq_mdio_reset(struct mii_bus *bus)
{
struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
- int timeout = PHY_INIT_TIMEOUT;
+ u32 status;
mutex_lock(&bus->mdio_lock);
@@ -155,12 +164,12 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
out_be32(&regs->miimcfg, MIIMCFG_INIT_VALUE);
/* Wait until the bus is free */
- while ((in_be32(&regs->miimind) & MIIMIND_BUSY) && timeout--)
- cpu_relax();
+ status = spin_event_timeout(!(in_be32(&regs->miimind) & MIIMIND_BUSY),
+ MII_TIMEOUT, 0);
mutex_unlock(&bus->mdio_lock);
- if (timeout < 0) {
+ if (!status) {
printk(KERN_ERR "%s: The MII Bus is stuck!\n",
bus->name);
return -EBUSY;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 0741ade..4605f72 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1,5 +1,4 @@
-/*
- * drivers/net/ethernet/freescale/gianfar.c
+/* drivers/net/ethernet/freescale/gianfar.c
*
* Gianfar Ethernet Driver
* This driver is designed for the non-CPM ethernet controllers
@@ -114,7 +113,7 @@ static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
struct sk_buff *gfar_new_skb(struct net_device *dev);
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
- struct sk_buff *skb);
+ struct sk_buff *skb);
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -266,8 +265,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
tx_queue->tx_bd_dma_base = addr;
tx_queue->dev = ndev;
/* enet DMA only understands physical addresses */
- addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
- vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
+ addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
+ vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
}
/* Start the rx descriptor ring where the tx ring leaves off */
@@ -276,15 +275,16 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
rx_queue->rx_bd_base = vaddr;
rx_queue->rx_bd_dma_base = addr;
rx_queue->dev = ndev;
- addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
- vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
+ addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
+ vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
}
/* Setup the skbuff rings */
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
- tx_queue->tx_ring_size, GFP_KERNEL);
+ tx_queue->tx_ring_size,
+ GFP_KERNEL);
if (!tx_queue->tx_skbuff) {
netif_err(priv, ifup, ndev,
"Could not allocate tx_skbuff\n");
@@ -298,7 +298,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
- rx_queue->rx_ring_size, GFP_KERNEL);
+ rx_queue->rx_ring_size,
+ GFP_KERNEL);
if (!rx_queue->rx_skbuff) {
netif_err(priv, ifup, ndev,
@@ -327,15 +328,15 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv)
int i;
baddr = &regs->tbase0;
- for(i = 0; i < priv->num_tx_queues; i++) {
+ for (i = 0; i < priv->num_tx_queues; i++) {
gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
- baddr += 2;
+ baddr += 2;
}
baddr = &regs->rbase0;
- for(i = 0; i < priv->num_rx_queues; i++) {
+ for (i = 0; i < priv->num_rx_queues; i++) {
gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
- baddr += 2;
+ baddr += 2;
}
}
@@ -405,7 +406,8 @@ static void gfar_init_mac(struct net_device *ndev)
gfar_write(&regs->attreli, attrs);
/* Start with defaults, and add stashing or locking
- * depending on the approprate variables */
+ * depending on the approprate variables
+ */
attrs = ATTR_INIT_SETTINGS;
if (priv->bd_stash_en)
@@ -426,16 +428,16 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
unsigned long tx_packets = 0, tx_bytes = 0;
- int i = 0;
+ int i;
for (i = 0; i < priv->num_rx_queues; i++) {
rx_packets += priv->rx_queue[i]->stats.rx_packets;
- rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
+ rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
}
dev->stats.rx_packets = rx_packets;
- dev->stats.rx_bytes = rx_bytes;
+ dev->stats.rx_bytes = rx_bytes;
dev->stats.rx_dropped = rx_dropped;
for (i = 0; i < priv->num_tx_queues; i++) {
@@ -443,7 +445,7 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
tx_packets += priv->tx_queue[i]->stats.tx_packets;
}
- dev->stats.tx_bytes = tx_bytes;
+ dev->stats.tx_bytes = tx_bytes;
dev->stats.tx_packets = tx_packets;
return &dev->stats;
@@ -468,7 +470,7 @@ static const struct net_device_ops gfar_netdev_ops = {
void lock_rx_qs(struct gfar_private *priv)
{
- int i = 0x0;
+ int i;
for (i = 0; i < priv->num_rx_queues; i++)
spin_lock(&priv->rx_queue[i]->rxlock);
@@ -476,7 +478,7 @@ void lock_rx_qs(struct gfar_private *priv)
void lock_tx_qs(struct gfar_private *priv)
{
- int i = 0x0;
+ int i;
for (i = 0; i < priv->num_tx_queues; i++)
spin_lock(&priv->tx_queue[i]->txlock);
@@ -484,7 +486,7 @@ void lock_tx_qs(struct gfar_private *priv)
void unlock_rx_qs(struct gfar_private *priv)
{
- int i = 0x0;
+ int i;
for (i = 0; i < priv->num_rx_queues; i++)
spin_unlock(&priv->rx_queue[i]->rxlock);
@@ -492,7 +494,7 @@ void unlock_rx_qs(struct gfar_private *priv)
void unlock_tx_qs(struct gfar_private *priv)
{
- int i = 0x0;
+ int i;
for (i = 0; i < priv->num_tx_queues; i++)
spin_unlock(&priv->tx_queue[i]->txlock);
@@ -508,13 +510,13 @@ static bool gfar_is_vlan_on(struct gfar_private *priv)
static inline int gfar_uses_fcb(struct gfar_private *priv)
{
return gfar_is_vlan_on(priv) ||
- (priv->ndev->features & NETIF_F_RXCSUM) ||
- (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
+ (priv->ndev->features & NETIF_F_RXCSUM) ||
+ (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
}
static void free_tx_pointers(struct gfar_private *priv)
{
- int i = 0;
+ int i;
for (i = 0; i < priv->num_tx_queues; i++)
kfree(priv->tx_queue[i]);
@@ -522,7 +524,7 @@ static void free_tx_pointers(struct gfar_private *priv)
static void free_rx_pointers(struct gfar_private *priv)
{
- int i = 0;
+ int i;
for (i = 0; i < priv->num_rx_queues; i++)
kfree(priv->rx_queue[i]);
@@ -530,7 +532,7 @@ static void free_rx_pointers(struct gfar_private *priv)
static void unmap_group_regs(struct gfar_private *priv)
{
- int i = 0;
+ int i;
for (i = 0; i < MAXGROUPS; i++)
if (priv->gfargrp[i].regs)
@@ -539,7 +541,7 @@ static void unmap_group_regs(struct gfar_private *priv)
static void disable_napi(struct gfar_private *priv)
{
- int i = 0;
+ int i;
for (i = 0; i < priv->num_grps; i++)
napi_disable(&priv->gfargrp[i].napi);
@@ -547,14 +549,14 @@ static void disable_napi(struct gfar_private *priv)
static void enable_napi(struct gfar_private *priv)
{
- int i = 0;
+ int i;
for (i = 0; i < priv->num_grps; i++)
napi_enable(&priv->gfargrp[i].napi);
}
static int gfar_parse_group(struct device_node *np,
- struct gfar_private *priv, const char *model)
+ struct gfar_private *priv, const char *model)
{
u32 *queue_mask;
@@ -580,15 +582,13 @@ static int gfar_parse_group(struct device_node *np,
priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
priv->gfargrp[priv->num_grps].priv = priv;
spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
- if(priv->mode == MQ_MG_MODE) {
- queue_mask = (u32 *)of_get_property(np,
- "fsl,rx-bit-map", NULL);
- priv->gfargrp[priv->num_grps].rx_bit_map =
- queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
- queue_mask = (u32 *)of_get_property(np,
- "fsl,tx-bit-map", NULL);
- priv->gfargrp[priv->num_grps].tx_bit_map =
- queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
+ if (priv->mode == MQ_MG_MODE) {
+ queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
+ priv->gfargrp[priv->num_grps].rx_bit_map = queue_mask ?
+ *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
+ queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
+ priv->gfargrp[priv->num_grps].tx_bit_map = queue_mask ?
+ *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
} else {
priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
@@ -652,7 +652,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->num_rx_queues = num_rx_qs;
priv->num_grps = 0x0;
- /* Init Rx queue filer rule set linked list*/
+ /* Init Rx queue filer rule set linked list */
INIT_LIST_HEAD(&priv->rx_list.list);
priv->rx_list.count = 0;
mutex_init(&priv->rx_queue_access);
@@ -673,7 +673,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
} else {
priv->mode = SQ_SG_MODE;
err = gfar_parse_group(np, priv, model);
- if(err)
+ if (err)
goto err_grp_init;
}
@@ -730,27 +730,27 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
mac_addr = of_get_mac_address(np);
+
if (mac_addr)
memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
if (model && !strcasecmp(model, "TSEC"))
- priv->device_flags =
- FSL_GIANFAR_DEV_HAS_GIGABIT |
- FSL_GIANFAR_DEV_HAS_COALESCE |
- FSL_GIANFAR_DEV_HAS_RMON |
- FSL_GIANFAR_DEV_HAS_MULTI_INTR;
+ priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+ FSL_GIANFAR_DEV_HAS_COALESCE |
+ FSL_GIANFAR_DEV_HAS_RMON |
+ FSL_GIANFAR_DEV_HAS_MULTI_INTR;
+
if (model && !strcasecmp(model, "eTSEC"))
- priv->device_flags =
- FSL_GIANFAR_DEV_HAS_GIGABIT |
- FSL_GIANFAR_DEV_HAS_COALESCE |
- FSL_GIANFAR_DEV_HAS_RMON |
- FSL_GIANFAR_DEV_HAS_MULTI_INTR |
- FSL_GIANFAR_DEV_HAS_PADDING |
- FSL_GIANFAR_DEV_HAS_CSUM |
- FSL_GIANFAR_DEV_HAS_VLAN |
- FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
- FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
- FSL_GIANFAR_DEV_HAS_TIMER;
+ priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+ FSL_GIANFAR_DEV_HAS_COALESCE |
+ FSL_GIANFAR_DEV_HAS_RMON |
+ FSL_GIANFAR_DEV_HAS_MULTI_INTR |
+ FSL_GIANFAR_DEV_HAS_PADDING |
+ FSL_GIANFAR_DEV_HAS_CSUM |
+ FSL_GIANFAR_DEV_HAS_VLAN |
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
+ FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
+ FSL_GIANFAR_DEV_HAS_TIMER;
ctype = of_get_property(np, "phy-connection-type", NULL);
@@ -781,7 +781,7 @@ err_grp_init:
}
static int gfar_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd)
+ struct ifreq *ifr, int cmd)
{
struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
@@ -851,6 +851,7 @@ static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
{
unsigned int new_bit_map = 0x0;
int mask = 0x1 << (max_qs - 1), i;
+
for (i = 0; i < max_qs; i++) {
if (bit_map & mask)
new_bit_map = new_bit_map + (1 << i);
@@ -936,22 +937,22 @@ static void gfar_detect_errata(struct gfar_private *priv)
/* MPC8313 Rev 2.0 and higher; All MPC837x */
if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
- (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+ (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
priv->errata |= GFAR_ERRATA_74;
/* MPC8313 and MPC837x all rev */
if ((pvr == 0x80850010 && mod == 0x80b0) ||
- (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+ (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
priv->errata |= GFAR_ERRATA_76;
/* MPC8313 and MPC837x all rev */
if ((pvr == 0x80850010 && mod == 0x80b0) ||
- (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+ (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
priv->errata |= GFAR_ERRATA_A002;
/* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
- (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
+ (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
priv->errata |= GFAR_ERRATA_12;
if (priv->errata)
@@ -960,7 +961,8 @@ static void gfar_detect_errata(struct gfar_private *priv)
}
/* Set up the ethernet device structure, private data,
- * and anything else we need before we start */
+ * and anything else we need before we start
+ */
static int gfar_probe(struct platform_device *ofdev)
{
u32 tempval;
@@ -991,8 +993,9 @@ static int gfar_probe(struct platform_device *ofdev)
gfar_detect_errata(priv);
- /* Stop the DMA engine now, in case it was running before */
- /* (The firmware could have used it, and left it running). */
+ /* Stop the DMA engine now, in case it was running before
+ * (The firmware could have used it, and left it running).
+ */
gfar_halt(dev);
/* Reset MAC layer */
@@ -1026,13 +1029,14 @@ static int gfar_probe(struct platform_device *ofdev)
/* Register for napi ...We are registering NAPI for each grp */
for (i = 0; i < priv->num_grps; i++)
- netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
+ netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
+ GFAR_DEV_WEIGHT);
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM;
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
+ NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
@@ -1081,7 +1085,7 @@ static int gfar_probe(struct platform_device *ofdev)
priv->padding = 0;
if (dev->features & NETIF_F_IP_CSUM ||
- priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+ priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
dev->needed_headroom = GMAC_FCB_LEN;
/* Program the isrg regs only if number of grps > 1 */
@@ -1098,28 +1102,32 @@ static int gfar_probe(struct platform_device *ofdev)
/* Need to reverse the bit maps as bit_map's MSB is q0
* but, for_each_set_bit parses from right to left, which
- * basically reverses the queue numbers */
+ * basically reverses the queue numbers
+ */
for (i = 0; i< priv->num_grps; i++) {
- priv->gfargrp[i].tx_bit_map = reverse_bitmap(
- priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
- priv->gfargrp[i].rx_bit_map = reverse_bitmap(
- priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
+ priv->gfargrp[i].tx_bit_map =
+ reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
+ priv->gfargrp[i].rx_bit_map =
+ reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
}
/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
- * also assign queues to groups */
+ * also assign queues to groups
+ */
for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
priv->gfargrp[grp_idx].num_rx_queues = 0x0;
+
for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
- priv->num_rx_queues) {
+ priv->num_rx_queues) {
priv->gfargrp[grp_idx].num_rx_queues++;
priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
}
priv->gfargrp[grp_idx].num_tx_queues = 0x0;
+
for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
- priv->num_tx_queues) {
+ priv->num_tx_queues) {
priv->gfargrp[grp_idx].num_tx_queues++;
priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
tstat = tstat | (TSTAT_CLEAR_THALT >> i);
@@ -1149,7 +1157,7 @@ static int gfar_probe(struct platform_device *ofdev)
priv->rx_queue[i]->rxic = DEFAULT_RXIC;
}
- /* always enable rx filer*/
+ /* always enable rx filer */
priv->rx_filer_enable = 1;
/* Enable most messages by default */
priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
@@ -1165,7 +1173,8 @@ static int gfar_probe(struct platform_device *ofdev)
}
device_init_wakeup(&dev->dev,
- priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ priv->device_flags &
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
/* fill out IRQ number and name fields */
for (i = 0; i < priv->num_grps; i++) {
@@ -1189,13 +1198,14 @@ static int gfar_probe(struct platform_device *ofdev)
/* Print out the device info */
netdev_info(dev, "mac: %pM\n", dev->dev_addr);
- /* Even more device info helps when determining which kernel */
- /* provided which set of benchmarks. */
+ /* Even more device info helps when determining which kernel
+ * provided which set of benchmarks.
+ */
netdev_info(dev, "Running with NAPI enabled\n");
for (i = 0; i < priv->num_rx_queues; i++)
netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
i, priv->rx_queue[i]->rx_ring_size);
- for(i = 0; i < priv->num_tx_queues; i++)
+ for (i = 0; i < priv->num_tx_queues; i++)
netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
i, priv->tx_queue[i]->tx_ring_size);
@@ -1242,7 +1252,8 @@ static int gfar_suspend(struct device *dev)
u32 tempval;
int magic_packet = priv->wol_en &&
- (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ (priv->device_flags &
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
netif_device_detach(ndev);
@@ -1294,7 +1305,8 @@ static int gfar_resume(struct device *dev)
unsigned long flags;
u32 tempval;
int magic_packet = priv->wol_en &&
- (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ (priv->device_flags &
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
if (!netif_running(ndev)) {
netif_device_attach(ndev);
@@ -1393,13 +1405,13 @@ static phy_interface_t gfar_get_interface(struct net_device *dev)
}
if (ecntrl & ECNTRL_REDUCED_MODE) {
- if (ecntrl & ECNTRL_REDUCED_MII_MODE)
+ if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
return PHY_INTERFACE_MODE_RMII;
+ }
else {
phy_interface_t interface = priv->interface;
- /*
- * This isn't autodetected right now, so it must
+ /* This isn't autodetected right now, so it must
* be set by the device tree or platform code.
*/
if (interface == PHY_INTERFACE_MODE_RGMII_ID)
@@ -1453,8 +1465,7 @@ static int init_phy(struct net_device *dev)
return 0;
}
-/*
- * Initialize TBI PHY interface for communicating with the
+/* Initialize TBI PHY interface for communicating with the
* SERDES lynx PHY on the chip. We communicate with this PHY
* through the MDIO bus on each controller, treating it as a
* "normal" PHY at the address found in the TBIPA register. We assume
@@ -1479,8 +1490,7 @@ static void gfar_configure_serdes(struct net_device *dev)
return;
}
- /*
- * If the link is already up, we must already be ok, and don't need to
+ /* If the link is already up, we must already be ok, and don't need to
* configure and reset the TBI<->SerDes link. Maybe U-Boot configured
* everything for us? Resetting it takes the link down and requires
* several seconds for it to come back.
@@ -1492,18 +1502,19 @@ static void gfar_configure_serdes(struct net_device *dev)
phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
phy_write(tbiphy, MII_ADVERTISE,
- ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
- ADVERTISE_1000XPSE_ASYM);
+ ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
+ ADVERTISE_1000XPSE_ASYM);
- phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
- BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
+ phy_write(tbiphy, MII_BMCR,
+ BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
+ BMCR_SPEED1000);
}
static void init_registers(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = NULL;
- int i = 0;
+ int i;
for (i = 0; i < priv->num_grps; i++) {
regs = priv->gfargrp[i].regs;
@@ -1554,15 +1565,13 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)
{
u32 res;
- /*
- * Normaly TSEC should not hang on GRS commands, so we should
+ /* Normaly TSEC should not hang on GRS commands, so we should
* actually wait for IEVENT_GRSC flag.
*/
if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
return 0;
- /*
- * Read the eTSEC register at offset 0xD1C. If bits 7-14 are
+ /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
* the same as bits 23-30, the eTSEC Rx is assumed to be idle
* and the Rx can be safely reset.
*/
@@ -1580,7 +1589,7 @@ static void gfar_halt_nodisable(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = NULL;
u32 tempval;
- int i = 0;
+ int i;
for (i = 0; i < priv->num_grps; i++) {
regs = priv->gfargrp[i].regs;
@@ -1594,8 +1603,8 @@ static void gfar_halt_nodisable(struct net_device *dev)
regs = priv->gfargrp[0].regs;
/* Stop the DMA, and wait for it to stop */
tempval = gfar_read(&regs->dmactrl);
- if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
- != (DMACTRL_GRS | DMACTRL_GTS)) {
+ if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
+ (DMACTRL_GRS | DMACTRL_GTS)) {
int ret;
tempval |= (DMACTRL_GRS | DMACTRL_GTS);
@@ -1660,7 +1669,7 @@ void stop_gfar(struct net_device *dev)
} else {
for (i = 0; i < priv->num_grps; i++)
free_irq(priv->gfargrp[i].interruptTransmit,
- &priv->gfargrp[i]);
+ &priv->gfargrp[i]);
}
free_skb_resources(priv);
@@ -1679,13 +1688,13 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
continue;
dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
- txbdp->length, DMA_TO_DEVICE);
+ txbdp->length, DMA_TO_DEVICE);
txbdp->lstatus = 0;
for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
- j++) {
+ j++) {
txbdp++;
dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
- txbdp->length, DMA_TO_DEVICE);
+ txbdp->length, DMA_TO_DEVICE);
}
txbdp++;
dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
@@ -1705,8 +1714,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
for (i = 0; i < rx_queue->rx_ring_size; i++) {
if (rx_queue->rx_skbuff[i]) {
dma_unmap_single(&priv->ofdev->dev,
- rxbdp->bufPtr, priv->rx_buffer_size,
- DMA_FROM_DEVICE);
+ rxbdp->bufPtr, priv->rx_buffer_size,
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
rx_queue->rx_skbuff[i] = NULL;
}
@@ -1718,7 +1727,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
}
/* If there are any tx skbs or rx skbs still around, free them.
- * Then free tx_skbuff and rx_skbuff */
+ * Then free tx_skbuff and rx_skbuff
+ */
static void free_skb_resources(struct gfar_private *priv)
{
struct gfar_priv_tx_q *tx_queue = NULL;
@@ -1728,24 +1738,25 @@ static void free_skb_resources(struct gfar_private *priv)
/* Go through all the buffer descriptors and free their data buffers */
for (i = 0; i < priv->num_tx_queues; i++) {
struct netdev_queue *txq;
+
tx_queue = priv->tx_queue[i];
txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
- if(tx_queue->tx_skbuff)
+ if (tx_queue->tx_skbuff)
free_skb_tx_queue(tx_queue);
netdev_tx_reset_queue(txq);
}
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
- if(rx_queue->rx_skbuff)
+ if (rx_queue->rx_skbuff)
free_skb_rx_queue(rx_queue);
}
dma_free_coherent(&priv->ofdev->dev,
- sizeof(struct txbd8) * priv->total_tx_ring_size +
- sizeof(struct rxbd8) * priv->total_rx_ring_size,
- priv->tx_queue[0]->tx_bd_base,
- priv->tx_queue[0]->tx_bd_dma_base);
+ sizeof(struct txbd8) * priv->total_tx_ring_size +
+ sizeof(struct rxbd8) * priv->total_rx_ring_size,
+ priv->tx_queue[0]->tx_bd_base,
+ priv->tx_queue[0]->tx_bd_dma_base);
skb_queue_purge(&priv->rx_recycle);
}
@@ -1784,7 +1795,7 @@ void gfar_start(struct net_device *dev)
}
void gfar_configure_coalescing(struct gfar_private *priv,
- unsigned long tx_mask, unsigned long rx_mask)
+ unsigned long tx_mask, unsigned long rx_mask)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 __iomem *baddr;
@@ -1794,28 +1805,26 @@ void gfar_configure_coalescing(struct gfar_private *priv,
* multiple queues, there's only single reg to program
*/
gfar_write(&regs->txic, 0);
- if(likely(priv->tx_queue[0]->txcoalescing))
+ if (likely(priv->tx_queue[0]->txcoalescing))
gfar_write(&regs->txic, priv->tx_queue[0]->txic);
gfar_write(&regs->rxic, 0);
- if(unlikely(priv->rx_queue[0]->rxcoalescing))
+ if (unlikely(priv->rx_queue[0]->rxcoalescing))
gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
if (priv->mode == MQ_MG_MODE) {
baddr = &regs->txic0;
for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
- if (likely(priv->tx_queue[i]->txcoalescing)) {
- gfar_write(baddr + i, 0);
+ gfar_write(baddr + i, 0);
+ if (likely(priv->tx_queue[i]->txcoalescing))
gfar_write(baddr + i, priv->tx_queue[i]->txic);
- }
}
baddr = &regs->rxic0;
for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
- if (likely(priv->rx_queue[i]->rxcoalescing)) {
- gfar_write(baddr + i, 0);
+ gfar_write(baddr + i, 0);
+ if (likely(priv->rx_queue[i]->rxcoalescing))
gfar_write(baddr + i, priv->rx_queue[i]->rxic);
- }
}
}
}
@@ -1827,12 +1836,14 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
int err;
/* If the device has multiple interrupts, register for
- * them. Otherwise, only register for the one */
+ * them. Otherwise, only register for the one
+ */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
/* Install our interrupt handlers for Error,
- * Transmit, and Receive */
- if ((err = request_irq(grp->interruptError, gfar_error, 0,
- grp->int_name_er,grp)) < 0) {
+ * Transmit, and Receive
+ */
+ if ((err = request_irq(grp->interruptError, gfar_error,
+ 0, grp->int_name_er, grp)) < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
grp->interruptError);
@@ -1840,21 +1851,21 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
}
if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
- 0, grp->int_name_tx, grp)) < 0) {
+ 0, grp->int_name_tx, grp)) < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
grp->interruptTransmit);
goto tx_irq_fail;
}
- if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
- grp->int_name_rx, grp)) < 0) {
+ if ((err = request_irq(grp->interruptReceive, gfar_receive,
+ 0, grp->int_name_rx, grp)) < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
grp->interruptReceive);
goto rx_irq_fail;
}
} else {
- if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
- grp->int_name_tx, grp)) < 0) {
+ if ((err = request_irq(grp->interruptTransmit, gfar_interrupt,
+ 0, grp->int_name_tx, grp)) < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
grp->interruptTransmit);
goto err_irq_fail;
@@ -1914,8 +1925,9 @@ irq_fail:
return err;
}
-/* Called when something needs to use the ethernet device */
-/* Returns 0 for success. */
+/* Called when something needs to use the ethernet device
+ * Returns 0 for success.
+ */
static int gfar_enet_open(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
@@ -1960,18 +1972,17 @@ static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
}
static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
- int fcb_length)
+ int fcb_length)
{
- u8 flags = 0;
-
/* If we're here, it's a IP packet with a TCP or UDP
* payload. We set it to checksum, using a pseudo-header
* we provide
*/
- flags = TXFCB_DEFAULT;
+ u8 flags = TXFCB_DEFAULT;
- /* Tell the controller what the protocol is */
- /* And provide the already calculated phcs */
+ /* Tell the controller what the protocol is
+ * And provide the already calculated phcs
+ */
if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
flags |= TXFCB_UDP;
fcb->phcs = udp_hdr(skb)->check;
@@ -1981,7 +1992,8 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
/* l3os is the distance between the start of the
* frame (skb->data) and the start of the IP hdr.
* l4os is the distance between the start of the
- * l3 hdr and the l4 hdr */
+ * l3 hdr and the l4 hdr
+ */
fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
fcb->l4os = skb_network_header_len(skb);
@@ -1995,7 +2007,7 @@ void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
}
static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
- struct txbd8 *base, int ring_size)
+ struct txbd8 *base, int ring_size)
{
struct txbd8 *new_bd = bdp + stride;
@@ -2003,13 +2015,14 @@ static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
}
static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
- int ring_size)
+ int ring_size)
{
return skip_txbd(bdp, 1, base, ring_size);
}
-/* This is called by the kernel when a frame is ready for transmission. */
-/* It is pointed to by the dev->hard_start_xmit function pointer */
+/* This is called by the kernel when a frame is ready for transmission.
+ * It is pointed to by the dev->hard_start_xmit function pointer
+ */
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
@@ -2024,13 +2037,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
- /*
- * TOE=1 frames larger than 2500 bytes may see excess delays
+ /* TOE=1 frames larger than 2500 bytes may see excess delays
* before start of transmission.
*/
if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
- skb->ip_summed == CHECKSUM_PARTIAL &&
- skb->len > 2500)) {
+ skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb->len > 2500)) {
int ret;
ret = skb_checksum_help(skb);
@@ -2046,16 +2058,16 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* check if time stamp should be generated */
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- priv->hwts_tx_en)) {
+ priv->hwts_tx_en)) {
do_tstamp = 1;
fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
}
/* make space for additional header when fcb is needed */
if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
- vlan_tx_tag_present(skb) ||
- unlikely(do_tstamp)) &&
- (skb_headroom(skb) < fcb_length)) {
+ vlan_tx_tag_present(skb) ||
+ unlikely(do_tstamp)) &&
+ (skb_headroom(skb) < fcb_length)) {
struct sk_buff *skb_new;
skb_new = skb_realloc_headroom(skb, fcb_length);
@@ -2065,10 +2077,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
- /* Steal sock reference for processing TX time stamps */
- swap(skb_new->sk, skb->sk);
- swap(skb_new->destructor, skb->destructor);
- kfree_skb(skb);
+ if (skb->sk)
+ skb_set_owner_w(skb_new, skb->sk);
+ consume_skb(skb);
skb = skb_new;
}
@@ -2099,12 +2110,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Time stamp insertion requires one additional TxBD */
if (unlikely(do_tstamp))
txbdp_tstamp = txbdp = next_txbd(txbdp, base,
- tx_queue->tx_ring_size);
+ tx_queue->tx_ring_size);
if (nr_frags == 0) {
if (unlikely(do_tstamp))
txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
- TXBD_INTERRUPT);
+ TXBD_INTERRUPT);
else
lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
} else {
@@ -2116,7 +2127,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
length = skb_shinfo(skb)->frags[i].size;
lstatus = txbdp->lstatus | length |
- BD_LFLAG(TXBD_READY);
+ BD_LFLAG(TXBD_READY);
/* Handle the last BD specially */
if (i == nr_frags - 1)
@@ -2146,8 +2157,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (CHECKSUM_PARTIAL == skb->ip_summed) {
fcb = gfar_add_fcb(skb);
/* as specified by errata */
- if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12)
- && ((unsigned long)fcb % 0x20) > 0x18)) {
+ if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) &&
+ ((unsigned long)fcb % 0x20) > 0x18)) {
__skb_pull(skb, GMAC_FCB_LEN);
skb_checksum_help(skb);
} else {
@@ -2175,10 +2186,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
+ skb_headlen(skb), DMA_TO_DEVICE);
- /*
- * If time stamping is requested one additional TxBD must be set up. The
+ /* If time stamping is requested one additional TxBD must be set up. The
* first TxBD points to the FCB and must have a data length of
* GMAC_FCB_LEN. The second TxBD points to the actual frame data with
* the full frame length.
@@ -2186,7 +2196,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(do_tstamp)) {
txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
- (skb_headlen(skb) - fcb_length);
+ (skb_headlen(skb) - fcb_length);
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
} else {
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
@@ -2194,8 +2204,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(txq, skb->len);
- /*
- * We can work in parallel with gfar_clean_tx_ring(), except
+ /* We can work in parallel with gfar_clean_tx_ring(), except
* when modifying num_txbdfree. Note that we didn't grab the lock
* when we were reading the num_txbdfree and checking for available
* space, that's because outside of this function it can only grow,
@@ -2208,8 +2217,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
spin_lock_irqsave(&tx_queue->txlock, flags);
- /*
- * The powerpc-specific eieio() is used, as wmb() has too strong
+ /* The powerpc-specific eieio() is used, as wmb() has too strong
* semantics (it requires synchronization between cacheable and
* uncacheable mappings, which eieio doesn't provide and which we
* don't need), thus requiring a more expensive sync instruction. At
@@ -2225,9 +2233,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
/* Update the current skb pointer to the next entry we will use
- * (wrapping if necessary) */
+ * (wrapping if necessary)
+ */
tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
- TX_RING_MOD_MASK(tx_queue->tx_ring_size);
+ TX_RING_MOD_MASK(tx_queue->tx_ring_size);
tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
@@ -2235,7 +2244,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->num_txbdfree -= (nr_txbds);
/* If the next BD still needs to be cleaned up, then the bds
- are full. We need to tell the kernel to stop sending us stuff. */
+ * are full. We need to tell the kernel to stop sending us stuff.
+ */
if (!tx_queue->num_txbdfree) {
netif_tx_stop_queue(txq);
@@ -2360,12 +2370,12 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
frame_size += priv->padding;
- tempsize =
- (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
- INCREMENTAL_BUFFER_SIZE;
+ tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
+ INCREMENTAL_BUFFER_SIZE;
/* Only stop and start the controller if it isn't already
- * stopped, and we changed something */
+ * stopped, and we changed something
+ */
if ((oldsize != tempsize) && (dev->flags & IFF_UP))
stop_gfar(dev);
@@ -2378,11 +2388,12 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
/* If the mtu is larger than the max size for standard
* ethernet frames (ie, a jumbo frame), then set maccfg2
- * to allow huge frames, and to check the length */
+ * to allow huge frames, and to check the length
+ */
tempval = gfar_read(&regs->maccfg2);
if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
- gfar_has_errata(priv, GFAR_ERRATA_74))
+ gfar_has_errata(priv, GFAR_ERRATA_74))
tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
else
tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
@@ -2403,7 +2414,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
static void gfar_reset_task(struct work_struct *work)
{
struct gfar_private *priv = container_of(work, struct gfar_private,
- reset_task);
+ reset_task);
struct net_device *dev = priv->ndev;
if (dev->flags & IFF_UP) {
@@ -2430,7 +2441,7 @@ static void gfar_align_skb(struct sk_buff *skb)
* as many bytes as needed to align the data properly
*/
skb_reserve(skb, RXBUF_ALIGNMENT -
- (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
+ (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
}
/* Interrupt Handler for Transmit complete */
@@ -2464,8 +2475,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
frags = skb_shinfo(skb)->nr_frags;
- /*
- * When time stamping, one additional TxBD must be freed.
+ /* When time stamping, one additional TxBD must be freed.
* Also, we need to dma_unmap_single() the TxPAL.
*/
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
@@ -2479,7 +2489,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
/* Only clean completed frames */
if ((lstatus & BD_LFLAG(TXBD_READY)) &&
- (lstatus & BD_LENGTH_MASK))
+ (lstatus & BD_LENGTH_MASK))
break;
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
@@ -2489,11 +2499,12 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
buflen = bdp->length;
dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
- buflen, DMA_TO_DEVICE);
+ buflen, DMA_TO_DEVICE);
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
struct skb_shared_hwtstamps shhwtstamps;
u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
+
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(*ns);
skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
@@ -2506,23 +2517,20 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
bdp = next_txbd(bdp, base, tx_ring_size);
for (i = 0; i < frags; i++) {
- dma_unmap_page(&priv->ofdev->dev,
- bdp->bufPtr,
- bdp->length,
- DMA_TO_DEVICE);
+ dma_unmap_page(&priv->ofdev->dev, bdp->bufPtr,
+ bdp->length, DMA_TO_DEVICE);
bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
bdp = next_txbd(bdp, base, tx_ring_size);
}
bytes_sent += skb->len;
- /*
- * If there's room in the queue (limit it to rx_buffer_size)
+ /* If there's room in the queue (limit it to rx_buffer_size)
* we add this skb back into the pool, if it's the right size
*/
if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
- skb_recycle_check(skb, priv->rx_buffer_size +
- RXBUF_ALIGNMENT)) {
+ skb_recycle_check(skb, priv->rx_buffer_size +
+ RXBUF_ALIGNMENT)) {
gfar_align_skb(skb);
skb_queue_head(&priv->rx_recycle, skb);
} else
@@ -2531,7 +2539,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
tx_queue->tx_skbuff[skb_dirtytx] = NULL;
skb_dirtytx = (skb_dirtytx + 1) &
- TX_RING_MOD_MASK(tx_ring_size);
+ TX_RING_MOD_MASK(tx_ring_size);
howmany++;
spin_lock_irqsave(&tx_queue->txlock, flags);
@@ -2561,8 +2569,7 @@ static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
__napi_schedule(&gfargrp->napi);
} else {
- /*
- * Clear IEVENT, so interrupts aren't called again
+ /* Clear IEVENT, so interrupts aren't called again
* because of the packets that have already arrived.
*/
gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
@@ -2579,7 +2586,7 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
}
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct net_device *dev = rx_queue->dev;
struct gfar_private *priv = netdev_priv(dev);
@@ -2590,7 +2597,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
gfar_init_rxbdp(rx_queue, bdp, buf);
}
-static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
+static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb = NULL;
@@ -2604,7 +2611,7 @@ static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
return skb;
}
-struct sk_buff * gfar_new_skb(struct net_device *dev)
+struct sk_buff *gfar_new_skb(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb = NULL;
@@ -2622,8 +2629,7 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
struct net_device_stats *stats = &dev->stats;
struct gfar_extra_stats *estats = &priv->extra_stats;
- /* If the packet was truncated, none of the other errors
- * matter */
+ /* If the packet was truncated, none of the other errors matter */
if (status & RXBD_TRUNCATED) {
stats->rx_length_errors++;
@@ -2664,7 +2670,8 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
{
/* If valid headers were found, and valid sums
* were verified, then we tell the kernel that no
- * checksumming is necessary. Otherwise, it is */
+ * checksumming is necessary. Otherwise, it is [FIXME]
+ */
if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
@@ -2672,8 +2679,7 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
}
-/* gfar_process_frame() -- handle one incoming packet if skb
- * isn't NULL. */
+/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
int amount_pull, struct napi_struct *napi)
{
@@ -2685,8 +2691,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
/* fcb is at the beginning if exists */
fcb = (struct rxfcb *)skb->data;
- /* Remove the FCB from the skb */
- /* Remove the padded bytes, if there are any */
+ /* Remove the FCB from the skb
+ * Remove the padded bytes, if there are any
+ */
if (amount_pull) {
skb_record_rx_queue(skb, fcb->rq);
skb_pull(skb, amount_pull);
@@ -2696,6 +2703,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
if (priv->hwts_rx_en) {
struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
u64 *ns = (u64 *) skb->data;
+
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
shhwtstamps->hwtstamp = ns_to_ktime(*ns);
}
@@ -2709,8 +2717,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
/* Tell the skb what kind of packet this is */
skb->protocol = eth_type_trans(skb, dev);
- /*
- * There's need to check for NETIF_F_HW_VLAN_RX here.
+ /* There's need to check for NETIF_F_HW_VLAN_RX here.
* Even if vlan rx accel is disabled, on some chips
* RXFCB_VLN is pseudo randomly set.
*/
@@ -2728,8 +2735,8 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
}
/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
- * until the budget/quota has been reached. Returns the number
- * of frames handled
+ * until the budget/quota has been reached. Returns the number
+ * of frames handled
*/
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
{
@@ -2749,6 +2756,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
struct sk_buff *newskb;
+
rmb();
/* Add another skb for the future */
@@ -2757,15 +2765,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
- priv->rx_buffer_size, DMA_FROM_DEVICE);
+ priv->rx_buffer_size, DMA_FROM_DEVICE);
if (unlikely(!(bdp->status & RXBD_ERR) &&
- bdp->length > priv->rx_buffer_size))
+ bdp->length > priv->rx_buffer_size))
bdp->status = RXBD_LARGE;
/* We drop the frame if we failed to allocate a new buffer */
if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
- bdp->status & RXBD_ERR)) {
+ bdp->status & RXBD_ERR)) {
count_errors(bdp->status, dev);
if (unlikely(!newskb))
@@ -2784,7 +2792,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
rx_queue->stats.rx_bytes += pkt_len;
skb_record_rx_queue(skb, rx_queue->qindex);
gfar_process_frame(dev, skb, amount_pull,
- &rx_queue->grp->napi);
+ &rx_queue->grp->napi);
} else {
netif_warn(priv, rx_err, dev, "Missing skb!\n");
@@ -2803,9 +2811,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
/* update to point at the next skb */
- rx_queue->skb_currx =
- (rx_queue->skb_currx + 1) &
- RX_RING_MOD_MASK(rx_queue->rx_ring_size);
+ rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
+ RX_RING_MOD_MASK(rx_queue->rx_ring_size);
}
/* Update the current rxbd pointer to be the next one */
@@ -2816,8 +2823,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
static int gfar_poll(struct napi_struct *napi, int budget)
{
- struct gfar_priv_grp *gfargrp = container_of(napi,
- struct gfar_priv_grp, napi);
+ struct gfar_priv_grp *gfargrp =
+ container_of(napi, struct gfar_priv_grp, napi);
struct gfar_private *priv = gfargrp->priv;
struct gfar __iomem *regs = gfargrp->regs;
struct gfar_priv_tx_q *tx_queue = NULL;
@@ -2831,11 +2838,11 @@ static int gfar_poll(struct napi_struct *napi, int budget)
budget_per_queue = budget/num_queues;
/* Clear IEVENT, so interrupts aren't called again
- * because of the packets that have already arrived */
+ * because of the packets that have already arrived
+ */
gfar_write(&regs->ievent, IEVENT_RTX_MASK);
while (num_queues && left_over_budget) {
-
budget_per_queue = left_over_budget/num_queues;
left_over_budget = 0;
@@ -2846,12 +2853,13 @@ static int gfar_poll(struct napi_struct *napi, int budget)
tx_queue = priv->tx_queue[rx_queue->qindex];
tx_cleaned += gfar_clean_tx_ring(tx_queue);
- rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
- budget_per_queue);
+ rx_cleaned_per_queue =
+ gfar_clean_rx_ring(rx_queue, budget_per_queue);
rx_cleaned += rx_cleaned_per_queue;
- if(rx_cleaned_per_queue < budget_per_queue) {
+ if (rx_cleaned_per_queue < budget_per_queue) {
left_over_budget = left_over_budget +
- (budget_per_queue - rx_cleaned_per_queue);
+ (budget_per_queue -
+ rx_cleaned_per_queue);
set_bit(i, &serviced_queues);
num_queues--;
}
@@ -2869,25 +2877,25 @@ static int gfar_poll(struct napi_struct *napi, int budget)
gfar_write(&regs->imask, IMASK_DEFAULT);
- /* If we are coalescing interrupts, update the timer */
- /* Otherwise, clear it */
- gfar_configure_coalescing(priv,
- gfargrp->rx_bit_map, gfargrp->tx_bit_map);
+ /* If we are coalescing interrupts, update the timer
+ * Otherwise, clear it
+ */
+ gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
+ gfargrp->tx_bit_map);
}
return rx_cleaned;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
+/* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
static void gfar_netpoll(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- int i = 0;
+ int i;
/* If the device has multiple interrupts, run tx/rx */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
@@ -2896,7 +2904,7 @@ static void gfar_netpoll(struct net_device *dev)
disable_irq(priv->gfargrp[i].interruptReceive);
disable_irq(priv->gfargrp[i].interruptError);
gfar_interrupt(priv->gfargrp[i].interruptTransmit,
- &priv->gfargrp[i]);
+ &priv->gfargrp[i]);
enable_irq(priv->gfargrp[i].interruptError);
enable_irq(priv->gfargrp[i].interruptReceive);
enable_irq(priv->gfargrp[i].interruptTransmit);
@@ -2905,7 +2913,7 @@ static void gfar_netpoll(struct net_device *dev)
for (i = 0; i < priv->num_grps; i++) {
disable_irq(priv->gfargrp[i].interruptTransmit);
gfar_interrupt(priv->gfargrp[i].interruptTransmit,
- &priv->gfargrp[i]);
+ &priv->gfargrp[i]);
enable_irq(priv->gfargrp[i].interruptTransmit);
}
}
@@ -2957,7 +2965,8 @@ static void adjust_link(struct net_device *dev)
u32 ecntrl = gfar_read(&regs->ecntrl);
/* Now we make sure that we can be in full duplex mode.
- * If not, we operate in half-duplex mode. */
+ * If not, we operate in half-duplex mode.
+ */
if (phydev->duplex != priv->oldduplex) {
new_state = 1;
if (!(phydev->duplex))
@@ -2983,7 +2992,8 @@ static void adjust_link(struct net_device *dev)
((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
/* Reduced mode distinguishes
- * between 10 and 100 */
+ * between 10 and 100
+ */
if (phydev->speed == SPEED_100)
ecntrl |= ECNTRL_R100;
else
@@ -3022,7 +3032,8 @@ static void adjust_link(struct net_device *dev)
/* Update the hash table based on the current list of multicast
* addresses we subscribe to. Also, change the promiscuity of
* the device based on the flags (this function is called
- * whenever dev->flags is changed */
+ * whenever dev->flags is changed
+ */
static void gfar_set_multi(struct net_device *dev)
{
struct netdev_hw_addr *ha;
@@ -3084,7 +3095,8 @@ static void gfar_set_multi(struct net_device *dev)
/* If we have extended hash tables, we need to
* clear the exact match registers to prepare for
- * setting them */
+ * setting them
+ */
if (priv->extended_hash) {
em_num = GFAR_EM_NUM + 1;
gfar_clear_exact_match(dev);
@@ -3110,13 +3122,14 @@ static void gfar_set_multi(struct net_device *dev)
/* Clears each of the exact match registers to zero, so they
- * don't interfere with normal reception */
+ * don't interfere with normal reception
+ */
static void gfar_clear_exact_match(struct net_device *dev)
{
int idx;
static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
- for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
+ for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
gfar_set_mac_for_addr(dev, idx, zero_arr);
}
@@ -3132,7 +3145,8 @@ static void gfar_clear_exact_match(struct net_device *dev)
* hash index which gaddr register to use, and the 5 other bits
* indicate which bit (assuming an IBM numbering scheme, which
* for PowerPC (tm) is usually the case) in the register holds
- * the entry. */
+ * the entry.
+ */
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
{
u32 tempval;
@@ -3164,8 +3178,9 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
macptr += num*2;
- /* Now copy it into the mac registers backwards, cuz */
- /* little endian is silly */
+ /* Now copy it into the mac registers backwards, cuz
+ * little endian is silly
+ */
for (idx = 0; idx < ETH_ALEN; idx++)
tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
@@ -3197,7 +3212,8 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
/* Hmm... */
if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
- netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n",
+ netdev_dbg(dev,
+ "error interrupt (ievent=0x%08x imask=0x%08x)\n",
events, gfar_read(&regs->imask));
/* Update the error counters */
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 8a02557..8971921 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -46,18 +46,24 @@
#include "gianfar.h"
extern void gfar_start(struct net_device *dev);
-extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
+extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
+ int rx_work_limit);
#define GFAR_MAX_COAL_USECS 0xffff
#define GFAR_MAX_COAL_FRAMES 0xff
static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
- u64 * buf);
+ u64 *buf);
static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
-static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
-static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
-static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
-static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
-static void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo);
+static int gfar_gcoalesce(struct net_device *dev,
+ struct ethtool_coalesce *cvals);
+static int gfar_scoalesce(struct net_device *dev,
+ struct ethtool_coalesce *cvals);
+static void gfar_gringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals);
+static int gfar_sringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals);
+static void gfar_gdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo);
static const char stat_gstrings[][ETH_GSTRING_LEN] = {
"rx-dropped-by-kernel",
@@ -130,14 +136,15 @@ static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
else
memcpy(buf, stat_gstrings,
- GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
+ GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
}
/* Fill in an array of 64-bit statistics from various sources.
* This array will be appended to the end of the ethtool_stats
* structure, and returned to user space
*/
-static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf)
+static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
+ u64 *buf)
{
int i;
struct gfar_private *priv = netdev_priv(dev);
@@ -174,8 +181,8 @@ static int gfar_sset_count(struct net_device *dev, int sset)
}
/* Fills in the drvinfo structure with some basic info */
-static void gfar_gdrvinfo(struct net_device *dev, struct
- ethtool_drvinfo *drvinfo)
+static void gfar_gdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
{
strncpy(drvinfo->driver, DRV_NAME, GFAR_INFOSTR_LEN);
strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN);
@@ -226,7 +233,8 @@ static int gfar_reglen(struct net_device *dev)
}
/* Return a dump of the GFAR register space */
-static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
+static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *regbuf)
{
int i;
struct gfar_private *priv = netdev_priv(dev);
@@ -239,7 +247,8 @@ static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, voi
/* Convert microseconds to ethernet clock ticks, which changes
* depending on what speed the controller is running at */
-static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs)
+static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
+ unsigned int usecs)
{
unsigned int count;
@@ -263,7 +272,8 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int use
}
/* Convert ethernet clock ticks to microseconds */
-static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int ticks)
+static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
+ unsigned int ticks)
{
unsigned int count;
@@ -288,7 +298,8 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
/* Get the coalescing parameters, and put them in the cvals
* structure. */
-static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
+static int gfar_gcoalesce(struct net_device *dev,
+ struct ethtool_coalesce *cvals)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_rx_q *rx_queue = NULL;
@@ -353,7 +364,8 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
* Both cvals->*_usecs and cvals->*_frames have to be > 0
* in order for coalescing to be active
*/
-static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
+static int gfar_scoalesce(struct net_device *dev,
+ struct ethtool_coalesce *cvals)
{
struct gfar_private *priv = netdev_priv(dev);
int i = 0;
@@ -364,7 +376,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
/* Set up rx coalescing */
/* As of now, we will enable/disable coalescing for all
* queues together in case of eTSEC2, this will be modified
- * along with the ethtool interface */
+ * along with the ethtool interface
+ */
if ((cvals->rx_coalesce_usecs == 0) ||
(cvals->rx_max_coalesced_frames == 0)) {
for (i = 0; i < priv->num_rx_queues; i++)
@@ -433,7 +446,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
/* Fills in rvals with the current ring parameters. Currently,
* rx, rx_mini, and rx_jumbo rings are the same size, as mini and
* jumbo are ignored by the driver */
-static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
+static void gfar_gringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_tx_q *tx_queue = NULL;
@@ -459,8 +473,10 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
/* Change the current ring parameters, stopping the controller if
* necessary so that we don't mess things up while we're in
* motion. We wait for the ring to be clean before reallocating
- * the rings. */
-static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
+ * the rings.
+ */
+static int gfar_sringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals)
{
struct gfar_private *priv = netdev_priv(dev);
int err = 0, i = 0;
@@ -486,7 +502,8 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
unsigned long flags;
/* Halt TX and RX, and process the frames which
- * have already been received */
+ * have already been received
+ */
local_irq_save(flags);
lock_tx_qs(priv);
lock_rx_qs(priv);
@@ -499,7 +516,7 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
for (i = 0; i < priv->num_rx_queues; i++)
gfar_clean_rx_ring(priv->rx_queue[i],
- priv->rx_queue[i]->rx_ring_size);
+ priv->rx_queue[i]->rx_ring_size);
/* Now we take down the rings to rebuild them */
stop_gfar(dev);
@@ -509,7 +526,8 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
for (i = 0; i < priv->num_rx_queues; i++) {
priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
- priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size;
+ priv->tx_queue[i]->num_txbdfree =
+ priv->tx_queue[i]->tx_ring_size;
}
/* Rebuild the rings with the new size */
@@ -535,7 +553,8 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
if (dev->flags & IFF_UP) {
/* Halt TX and RX, and process the frames which
- * have already been received */
+ * have already been received
+ */
local_irq_save(flags);
lock_tx_qs(priv);
lock_rx_qs(priv);
@@ -548,7 +567,7 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
for (i = 0; i < priv->num_rx_queues; i++)
gfar_clean_rx_ring(priv->rx_queue[i],
- priv->rx_queue[i]->rx_ring_size);
+ priv->rx_queue[i]->rx_ring_size);
/* Now we take down the rings to rebuild them */
stop_gfar(dev);
@@ -564,12 +583,14 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
static uint32_t gfar_get_msglevel(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
+
return priv->msg_enable;
}
static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
{
struct gfar_private *priv = netdev_priv(dev);
+
priv->msg_enable = data;
}
@@ -614,14 +635,14 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L2DA) {
fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
- RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
- RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -630,7 +651,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_VLAN) {
fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
@@ -639,7 +660,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_IP_SRC) {
fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -648,7 +669,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & (RXH_IP_DST)) {
fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -657,7 +678,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L3_PROTO) {
fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -666,7 +687,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L4_B_0_1) {
fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -675,7 +696,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L4_B_2_3) {
fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -683,7 +704,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
}
}
-static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class)
+static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
+ u64 class)
{
unsigned int last_rule_idx = priv->cur_filer_idx;
unsigned int cmp_rqfpr;
@@ -694,9 +716,9 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
int ret = 1;
local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
- GFP_KERNEL);
+ GFP_KERNEL);
local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!local_rqfpr || !local_rqfcr) {
pr_err("Out of memory\n");
ret = 0;
@@ -726,9 +748,9 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
local_rqfpr[j] = priv->ftp_rqfpr[i];
local_rqfcr[j] = priv->ftp_rqfcr[i];
j--;
- if ((priv->ftp_rqfcr[i] == (RQFCR_PID_PARSE |
- RQFCR_CLE |RQFCR_AND)) &&
- (priv->ftp_rqfpr[i] == cmp_rqfpr))
+ if ((priv->ftp_rqfcr[i] ==
+ (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) &&
+ (priv->ftp_rqfpr[i] == cmp_rqfpr))
break;
}
@@ -743,12 +765,12 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
*/
for (l = i+1; l < MAX_FILER_IDX; l++) {
if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
- !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
+ !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
- RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
+ RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
priv->ftp_rqfpr[l] = FPR_FILER_MASK;
gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
- priv->ftp_rqfpr[l]);
+ priv->ftp_rqfpr[l]);
break;
}
@@ -773,7 +795,7 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
gfar_write_filer(priv, priv->cur_filer_idx,
- local_rqfcr[k], local_rqfpr[k]);
+ local_rqfcr[k], local_rqfpr[k]);
if (!priv->cur_filer_idx)
break;
priv->cur_filer_idx = priv->cur_filer_idx - 1;
@@ -785,7 +807,8 @@ err:
return ret;
}
-static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
+static int gfar_set_hash_opts(struct gfar_private *priv,
+ struct ethtool_rxnfc *cmd)
{
/* write the filer rules here */
if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
@@ -810,10 +833,10 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
netdev_info(priv->ndev,
- "Receive Queue Filtering enabled\n");
+ "Receive Queue Filtering enabled\n");
} else {
netdev_warn(priv->ndev,
- "Receive Queue Filtering disabled\n");
+ "Receive Queue Filtering disabled\n");
return -EOPNOTSUPP;
}
}
@@ -823,16 +846,17 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
i &= RCTRL_PRSDEP_MASK;
if (i == RCTRL_PRSDEP_MASK) {
netdev_info(priv->ndev,
- "Receive Queue Filtering enabled\n");
+ "Receive Queue Filtering enabled\n");
} else {
netdev_warn(priv->ndev,
- "Receive Queue Filtering disabled\n");
+ "Receive Queue Filtering disabled\n");
return -EOPNOTSUPP;
}
}
/* Sets the properties for arbitrary filer rule
- * to the first 4 Layer 4 Bytes */
+ * to the first 4 Layer 4 Bytes
+ */
regs->rbifx = 0xC0C1C2C3;
return 0;
}
@@ -870,14 +894,14 @@ static void gfar_set_mask(u32 mask, struct filer_table *tab)
static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
{
gfar_set_mask(mask, tab);
- tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE
- | RQFCR_AND;
+ tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE |
+ RQFCR_AND;
tab->fe[tab->index].prop = value;
tab->index++;
}
static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
- struct filer_table *tab)
+ struct filer_table *tab)
{
gfar_set_mask(mask, tab);
tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
@@ -885,8 +909,7 @@ static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
tab->index++;
}
-/*
- * For setting a tuple of value and mask of type flag
+/* For setting a tuple of value and mask of type flag
* Example:
* IP-Src = 10.0.0.0/255.0.0.0
* value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
@@ -901,7 +924,7 @@ static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
* Further the all masks are one-padded for better hardware efficiency.
*/
static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
- struct filer_table *tab)
+ struct filer_table *tab)
{
switch (flag) {
/* 3bit */
@@ -959,7 +982,8 @@ static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
/* Translates value and mask for UDP, TCP or SCTP */
static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
- struct ethtool_tcpip4_spec *mask, struct filer_table *tab)
+ struct ethtool_tcpip4_spec *mask,
+ struct filer_table *tab)
{
gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
@@ -970,97 +994,92 @@ static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
/* Translates value and mask for RAW-IP4 */
static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
- struct ethtool_usrip4_spec *mask, struct filer_table *tab)
+ struct ethtool_usrip4_spec *mask,
+ struct filer_table *tab)
{
gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB,
- tab);
+ tab);
}
/* Translates value and mask for ETHER spec */
static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
- struct filer_table *tab)
+ struct filer_table *tab)
{
u32 upper_temp_mask = 0;
u32 lower_temp_mask = 0;
+
/* Source address */
if (!is_broadcast_ether_addr(mask->h_source)) {
-
if (is_zero_ether_addr(mask->h_source)) {
upper_temp_mask = 0xFFFFFFFF;
lower_temp_mask = 0xFFFFFFFF;
} else {
- upper_temp_mask = mask->h_source[0] << 16
- | mask->h_source[1] << 8
- | mask->h_source[2];
- lower_temp_mask = mask->h_source[3] << 16
- | mask->h_source[4] << 8
- | mask->h_source[5];
+ upper_temp_mask = mask->h_source[0] << 16 |
+ mask->h_source[1] << 8 |
+ mask->h_source[2];
+ lower_temp_mask = mask->h_source[3] << 16 |
+ mask->h_source[4] << 8 |
+ mask->h_source[5];
}
/* Upper 24bit */
- gfar_set_attribute(
- value->h_source[0] << 16 | value->h_source[1]
- << 8 | value->h_source[2],
- upper_temp_mask, RQFCR_PID_SAH, tab);
+ gfar_set_attribute(value->h_source[0] << 16 |
+ value->h_source[1] << 8 |
+ value->h_source[2],
+ upper_temp_mask, RQFCR_PID_SAH, tab);
/* And the same for the lower part */
- gfar_set_attribute(
- value->h_source[3] << 16 | value->h_source[4]
- << 8 | value->h_source[5],
- lower_temp_mask, RQFCR_PID_SAL, tab);
+ gfar_set_attribute(value->h_source[3] << 16 |
+ value->h_source[4] << 8 |
+ value->h_source[5],
+ lower_temp_mask, RQFCR_PID_SAL, tab);
}
/* Destination address */
if (!is_broadcast_ether_addr(mask->h_dest)) {
-
/* Special for destination is limited broadcast */
- if ((is_broadcast_ether_addr(value->h_dest)
- && is_zero_ether_addr(mask->h_dest))) {
+ if ((is_broadcast_ether_addr(value->h_dest) &&
+ is_zero_ether_addr(mask->h_dest))) {
gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
} else {
-
if (is_zero_ether_addr(mask->h_dest)) {
upper_temp_mask = 0xFFFFFFFF;
lower_temp_mask = 0xFFFFFFFF;
} else {
- upper_temp_mask = mask->h_dest[0] << 16
- | mask->h_dest[1] << 8
- | mask->h_dest[2];
- lower_temp_mask = mask->h_dest[3] << 16
- | mask->h_dest[4] << 8
- | mask->h_dest[5];
+ upper_temp_mask = mask->h_dest[0] << 16 |
+ mask->h_dest[1] << 8 |
+ mask->h_dest[2];
+ lower_temp_mask = mask->h_dest[3] << 16 |
+ mask->h_dest[4] << 8 |
+ mask->h_dest[5];
}
/* Upper 24bit */
- gfar_set_attribute(
- value->h_dest[0] << 16
- | value->h_dest[1] << 8
- | value->h_dest[2],
- upper_temp_mask, RQFCR_PID_DAH, tab);
+ gfar_set_attribute(value->h_dest[0] << 16 |
+ value->h_dest[1] << 8 |
+ value->h_dest[2],
+ upper_temp_mask, RQFCR_PID_DAH, tab);
/* And the same for the lower part */
- gfar_set_attribute(
- value->h_dest[3] << 16
- | value->h_dest[4] << 8
- | value->h_dest[5],
- lower_temp_mask, RQFCR_PID_DAL, tab);
+ gfar_set_attribute(value->h_dest[3] << 16 |
+ value->h_dest[4] << 8 |
+ value->h_dest[5],
+ lower_temp_mask, RQFCR_PID_DAL, tab);
}
}
gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab);
-
}
/* Convert a rule to binary filter format of gianfar */
static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
- struct filer_table *tab)
+ struct filer_table *tab)
{
u32 vlan = 0, vlan_mask = 0;
u32 id = 0, id_mask = 0;
u32 cfi = 0, cfi_mask = 0;
u32 prio = 0, prio_mask = 0;
-
u32 old_index = tab->index;
/* Check if vlan is wanted */
@@ -1076,13 +1095,16 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK;
cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK;
cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK;
- prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
- prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+ prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
vlan |= RQFPR_CFI;
vlan_mask |= RQFPR_CFI;
- } else if (cfi != VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
+ } else if (cfi != VLAN_TAG_PRESENT &&
+ cfi_mask == VLAN_TAG_PRESENT) {
vlan_mask |= RQFPR_CFI;
}
}
@@ -1090,34 +1112,36 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
switch (rule->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW:
gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
- RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
+ RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
- &rule->m_u.tcp_ip4_spec, tab);
+ &rule->m_u.tcp_ip4_spec, tab);
break;
case UDP_V4_FLOW:
gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
- RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
+ RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
- &rule->m_u.udp_ip4_spec, tab);
+ &rule->m_u.udp_ip4_spec, tab);
break;
case SCTP_V4_FLOW:
gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
- tab);
+ tab);
gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
- gfar_set_basic_ip((struct ethtool_tcpip4_spec *) &rule->h_u,
- (struct ethtool_tcpip4_spec *) &rule->m_u, tab);
+ gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u,
+ (struct ethtool_tcpip4_spec *)&rule->m_u,
+ tab);
break;
case IP_USER_FLOW:
gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
- tab);
+ tab);
gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
- (struct ethtool_usrip4_spec *) &rule->m_u, tab);
+ (struct ethtool_usrip4_spec *) &rule->m_u,
+ tab);
break;
case ETHER_FLOW:
if (vlan)
gfar_set_parse_bits(vlan, vlan_mask, tab);
gfar_set_ether((struct ethhdr *) &rule->h_u,
- (struct ethhdr *) &rule->m_u, tab);
+ (struct ethhdr *) &rule->m_u, tab);
break;
default:
return -1;
@@ -1152,7 +1176,9 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
}
- /* In rare cases the cache can be full while there is free space in hw */
+ /* In rare cases the cache can be full while there is
+ * free space in hw
+ */
if (tab->index > MAX_FILER_CACHE_IDX - 1)
return -EBUSY;
@@ -1161,7 +1187,7 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
/* Copy size filer entries */
static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
- struct gfar_filer_entry src[0], s32 size)
+ struct gfar_filer_entry src[0], s32 size)
{
while (size > 0) {
size--;
@@ -1171,10 +1197,12 @@ static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
}
/* Delete the contents of the filer-table between start and end
- * and collapse them */
+ * and collapse them
+ */
static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
{
int length;
+
if (end > MAX_FILER_CACHE_IDX || end < begin)
return -EINVAL;
@@ -1200,14 +1228,14 @@ static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
/* Make space on the wanted location */
static int gfar_expand_filer_entries(u32 begin, u32 length,
- struct filer_table *tab)
+ struct filer_table *tab)
{
- if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || begin
- > MAX_FILER_CACHE_IDX)
+ if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
+ begin > MAX_FILER_CACHE_IDX)
return -EINVAL;
gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
- tab->index - length + 1);
+ tab->index - length + 1);
tab->index += length;
return 0;
@@ -1215,9 +1243,10 @@ static int gfar_expand_filer_entries(u32 begin, u32 length,
static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
{
- for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) {
- if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE))
- == (RQFCR_AND | RQFCR_CLE))
+ for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
+ start++) {
+ if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
+ (RQFCR_AND | RQFCR_CLE))
return start;
}
return -1;
@@ -1225,16 +1254,16 @@ static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
{
- for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) {
- if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE))
- == (RQFCR_CLE))
+ for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
+ start++) {
+ if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
+ (RQFCR_CLE))
return start;
}
return -1;
}
-/*
- * Uses hardwares clustering option to reduce
+/* Uses hardwares clustering option to reduce
* the number of filer table entries
*/
static void gfar_cluster_filer(struct filer_table *tab)
@@ -1244,8 +1273,7 @@ static void gfar_cluster_filer(struct filer_table *tab)
while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
j = i;
while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
- /*
- * The cluster entries self and the previous one
+ /* The cluster entries self and the previous one
* (a mask) must be identical!
*/
if (tab->fe[i].ctrl != tab->fe[j].ctrl)
@@ -1260,21 +1288,21 @@ static void gfar_cluster_filer(struct filer_table *tab)
jend = gfar_get_next_cluster_end(j, tab);
if (jend == -1 || iend == -1)
break;
- /*
- * First we make some free space, where our cluster
+
+ /* First we make some free space, where our cluster
* element should be. Then we copy it there and finally
* delete in from its old location.
*/
-
- if (gfar_expand_filer_entries(iend, (jend - j), tab)
- == -EINVAL)
+ if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
+ -EINVAL)
break;
gfar_copy_filer_entries(&(tab->fe[iend + 1]),
- &(tab->fe[jend + 1]), jend - j);
+ &(tab->fe[jend + 1]), jend - j);
if (gfar_trim_filer_entries(jend - 1,
- jend + (jend - j), tab) == -EINVAL)
+ jend + (jend - j),
+ tab) == -EINVAL)
return;
/* Mask out cluster bit */
@@ -1285,8 +1313,9 @@ static void gfar_cluster_filer(struct filer_table *tab)
/* Swaps the masked bits of a1<>a2 and b1<>b2 */
static void gfar_swap_bits(struct gfar_filer_entry *a1,
- struct gfar_filer_entry *a2, struct gfar_filer_entry *b1,
- struct gfar_filer_entry *b2, u32 mask)
+ struct gfar_filer_entry *a2,
+ struct gfar_filer_entry *b1,
+ struct gfar_filer_entry *b2, u32 mask)
{
u32 temp[4];
temp[0] = a1->ctrl & mask;
@@ -1305,13 +1334,12 @@ static void gfar_swap_bits(struct gfar_filer_entry *a1,
b2->ctrl |= temp[2];
}
-/*
- * Generate a list consisting of masks values with their start and
+/* Generate a list consisting of masks values with their start and
* end of validity and block as indicator for parts belonging
* together (glued by ANDs) in mask_table
*/
static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
- struct filer_table *tab)
+ struct filer_table *tab)
{
u32 i, and_index = 0, block_index = 1;
@@ -1327,13 +1355,13 @@ static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
and_index++;
}
/* cluster starts and ends will be separated because they should
- * hold their position */
+ * hold their position
+ */
if (tab->fe[i].ctrl & RQFCR_CLE)
block_index++;
/* A not set AND indicates the end of a depended block */
if (!(tab->fe[i].ctrl & RQFCR_AND))
block_index++;
-
}
mask_table[and_index - 1].end = i - 1;
@@ -1341,14 +1369,13 @@ static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
return and_index;
}
-/*
- * Sorts the entries of mask_table by the values of the masks.
+/* Sorts the entries of mask_table by the values of the masks.
* Important: The 0xFF80 flags of the first and last entry of a
* block must hold their position (which queue, CLusterEnable, ReJEct,
* AND)
*/
static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
- struct filer_table *temp_table, u32 and_index)
+ struct filer_table *temp_table, u32 and_index)
{
/* Pointer to compare function (_asc or _desc) */
int (*gfar_comp)(const void *, const void *);
@@ -1359,16 +1386,16 @@ static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
gfar_comp = &gfar_comp_desc;
for (i = 0; i < and_index; i++) {
-
if (prev != mask_table[i].block) {
old_first = mask_table[start].start + 1;
old_last = mask_table[i - 1].end;
sort(mask_table + start, size,
- sizeof(struct gfar_mask_entry),
- gfar_comp, &gfar_swap);
+ sizeof(struct gfar_mask_entry),
+ gfar_comp, &gfar_swap);
/* Toggle order for every block. This makes the
- * thing more efficient! */
+ * thing more efficient!
+ */
if (gfar_comp == gfar_comp_desc)
gfar_comp = &gfar_comp_asc;
else
@@ -1378,12 +1405,11 @@ static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
new_last = mask_table[i - 1].end;
gfar_swap_bits(&temp_table->fe[new_first],
- &temp_table->fe[old_first],
- &temp_table->fe[new_last],
- &temp_table->fe[old_last],
- RQFCR_QUEUE | RQFCR_CLE |
- RQFCR_RJE | RQFCR_AND
- );
+ &temp_table->fe[old_first],
+ &temp_table->fe[new_last],
+ &temp_table->fe[old_last],
+ RQFCR_QUEUE | RQFCR_CLE |
+ RQFCR_RJE | RQFCR_AND);
start = i;
size = 0;
@@ -1391,11 +1417,9 @@ static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
size++;
prev = mask_table[i].block;
}
-
}
-/*
- * Reduces the number of masks needed in the filer table to save entries
+/* Reduces the number of masks needed in the filer table to save entries
* This is done by sorting the masks of a depended block. A depended block is
* identified by gluing ANDs or CLE. The sorting order toggles after every
* block. Of course entries in scope of a mask must change their location with
@@ -1410,13 +1434,14 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
s32 ret = 0;
/* We need a copy of the filer table because
- * we want to change its order */
+ * we want to change its order
+ */
temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
if (temp_table == NULL)
return -ENOMEM;
mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
- sizeof(struct gfar_mask_entry), GFP_KERNEL);
+ sizeof(struct gfar_mask_entry), GFP_KERNEL);
if (mask_table == NULL) {
ret = -ENOMEM;
@@ -1428,7 +1453,8 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
gfar_sort_mask_table(mask_table, temp_table, and_index);
/* Now we can copy the data from our duplicated filer table to
- * the real one in the order the mask table says */
+ * the real one in the order the mask table says
+ */
for (i = 0; i < and_index; i++) {
size = mask_table[i].end - mask_table[i].start + 1;
gfar_copy_filer_entries(&(tab->fe[j]),
@@ -1437,7 +1463,8 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
}
/* And finally we just have to check for duplicated masks and drop the
- * second ones */
+ * second ones
+ */
for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
if (tab->fe[i].ctrl == 0x80) {
previous_mask = i++;
@@ -1448,7 +1475,8 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
if (tab->fe[i].ctrl == 0x80) {
if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
/* Two identical ones found!
- * So drop the second one! */
+ * So drop the second one!
+ */
gfar_trim_filer_entries(i, i, tab);
} else
/* Not identical! */
@@ -1463,7 +1491,7 @@ end: kfree(temp_table);
/* Write the bit-pattern from software's buffer to hardware registers */
static int gfar_write_filer_table(struct gfar_private *priv,
- struct filer_table *tab)
+ struct filer_table *tab)
{
u32 i = 0;
if (tab->index > MAX_FILER_IDX - 1)
@@ -1473,13 +1501,15 @@ static int gfar_write_filer_table(struct gfar_private *priv,
lock_rx_qs(priv);
/* Fill regular entries */
- for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); i++)
+ for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
+ i++)
gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
/* Fill the rest with fall-troughs */
for (; i < MAX_FILER_IDX - 1; i++)
gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
/* Last entry must be default accept
- * because that's what people expect */
+ * because that's what people expect
+ */
gfar_write_filer(priv, i, 0x20, 0x0);
unlock_rx_qs(priv);
@@ -1488,21 +1518,21 @@ static int gfar_write_filer_table(struct gfar_private *priv,
}
static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
- struct gfar_private *priv)
+ struct gfar_private *priv)
{
if (flow->flow_type & FLOW_EXT) {
if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
netdev_warn(priv->ndev,
- "User-specific data not supported!\n");
+ "User-specific data not supported!\n");
if (~flow->m_ext.vlan_etype)
netdev_warn(priv->ndev,
- "VLAN-etype not supported!\n");
+ "VLAN-etype not supported!\n");
}
if (flow->flow_type == IP_USER_FLOW)
if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
netdev_warn(priv->ndev,
- "IP-Version differing from IPv4 not supported!\n");
+ "IP-Version differing from IPv4 not supported!\n");
return 0;
}
@@ -1520,15 +1550,18 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
return -ENOMEM;
/* Now convert the existing filer data from flow_spec into
- * filer tables binary format */
+ * filer tables binary format
+ */
list_for_each_entry(j, &priv->rx_list.list, list) {
ret = gfar_convert_to_filer(&j->fs, tab);
if (ret == -EBUSY) {
- netdev_err(priv->ndev, "Rule not added: No free space!\n");
+ netdev_err(priv->ndev,
+ "Rule not added: No free space!\n");
goto end;
}
if (ret == -1) {
- netdev_err(priv->ndev, "Rule not added: Unsupported Flow-type!\n");
+ netdev_err(priv->ndev,
+ "Rule not added: Unsupported Flow-type!\n");
goto end;
}
}
@@ -1540,9 +1573,9 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
gfar_optimize_filer_masks(tab);
pr_debug("\n\tSummary:\n"
- "\tData on hardware: %d\n"
- "\tCompression rate: %d%%\n",
- tab->index, 100 - (100 * tab->index) / i);
+ "\tData on hardware: %d\n"
+ "\tCompression rate: %d%%\n",
+ tab->index, 100 - (100 * tab->index) / i);
/* Write everything to hardware */
ret = gfar_write_filer_table(priv, tab);
@@ -1551,7 +1584,8 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
goto end;
}
-end: kfree(tab);
+end:
+ kfree(tab);
return ret;
}
@@ -1569,7 +1603,7 @@ static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
}
static int gfar_add_cls(struct gfar_private *priv,
- struct ethtool_rx_flow_spec *flow)
+ struct ethtool_rx_flow_spec *flow)
{
struct ethtool_flow_spec_container *temp, *comp;
int ret = 0;
@@ -1591,7 +1625,6 @@ static int gfar_add_cls(struct gfar_private *priv,
list_add(&temp->list, &priv->rx_list.list);
goto process;
} else {
-
list_for_each_entry(comp, &priv->rx_list.list, list) {
if (comp->fs.location > flow->location) {
list_add_tail(&temp->list, &comp->list);
@@ -1599,8 +1632,8 @@ static int gfar_add_cls(struct gfar_private *priv,
}
if (comp->fs.location == flow->location) {
netdev_err(priv->ndev,
- "Rule not added: ID %d not free!\n",
- flow->location);
+ "Rule not added: ID %d not free!\n",
+ flow->location);
ret = -EBUSY;
goto clean_mem;
}
@@ -1642,7 +1675,6 @@ static int gfar_del_cls(struct gfar_private *priv, u32 loc)
}
return ret;
-
}
static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
@@ -1663,7 +1695,7 @@ static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
}
static int gfar_get_cls_all(struct gfar_private *priv,
- struct ethtool_rxnfc *cmd, u32 *rule_locs)
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct ethtool_flow_spec_container *comp;
u32 i = 0;
@@ -1714,7 +1746,7 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
}
static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+ u32 *rule_locs)
{
struct gfar_private *priv = netdev_priv(dev);
int ret = 0;
@@ -1748,23 +1780,19 @@ static int gfar_get_ts_info(struct net_device *dev,
struct gfar_private *priv = netdev_priv(dev);
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
- info->so_timestamping =
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
info->phc_index = -1;
return 0;
}
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = gfar_phc_index;
- info->tx_types =
- (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
- info->rx_filters =
- (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_ALL);
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 9ac14f8..21c6574 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -185,7 +185,7 @@ static void mem_disp(u8 *addr, int size)
for (; (u32) i < (u32) addr + size4Aling; i += 4)
printk("%08x ", *((u32 *) (i)));
for (; (u32) i < (u32) addr + size; i++)
- printk("%02x", *((u8 *) (i)));
+ printk("%02x", *((i)));
if (notAlign == 1)
printk("\r\n");
}
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index d496673..3f4391b 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1217,7 +1217,7 @@ static int hp100_init_rxpdl(struct net_device *dev,
ringptr->pdl = pdlptr + 1;
ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr + 1);
- ringptr->skb = (void *) NULL;
+ ringptr->skb = NULL;
/*
* Write address and length of first PDL Fragment (which is used for
@@ -1243,7 +1243,7 @@ static int hp100_init_txpdl(struct net_device *dev,
ringptr->pdl = pdlptr; /* +1; */
ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr); /* +1 */
- ringptr->skb = (void *) NULL;
+ ringptr->skb = NULL;
return roundup(MAX_TX_FRAG * 2 + 2, 4);
}
@@ -1628,7 +1628,7 @@ static void hp100_clean_txring(struct net_device *dev)
/* Conversion to new PCI API : NOP */
pci_unmap_single(lp->pci_dev, (dma_addr_t) lp->txrhead->pdl[1], lp->txrhead->pdl[2], PCI_DMA_TODEVICE);
dev_kfree_skb_any(lp->txrhead->skb);
- lp->txrhead->skb = (void *) NULL;
+ lp->txrhead->skb = NULL;
lp->txrhead = lp->txrhead->next;
lp->txrcommit--;
}
diff --git a/drivers/net/ethernet/i825xx/lp486e.c b/drivers/net/ethernet/i825xx/lp486e.c
index 6c2952c..3735bfa 100644
--- a/drivers/net/ethernet/i825xx/lp486e.c
+++ b/drivers/net/ethernet/i825xx/lp486e.c
@@ -629,10 +629,10 @@ init_i596(struct net_device *dev) {
memcpy ((void *)lp->eth_addr, dev->dev_addr, 6);
lp->set_add.command = CmdIASetup;
- i596_add_cmd(dev, (struct i596_cmd *)&lp->set_add);
+ i596_add_cmd(dev, &lp->set_add);
lp->tdr.command = CmdTDR;
- i596_add_cmd(dev, (struct i596_cmd *)&lp->tdr);
+ i596_add_cmd(dev, &lp->tdr);
if (lp->scb.command && i596_timeout(dev, "i82596 init", 200))
return 1;
@@ -737,7 +737,7 @@ i596_cleanup_cmd(struct net_device *dev) {
lp = netdev_priv(dev);
while (lp->cmd_head) {
- cmd = (struct i596_cmd *)lp->cmd_head;
+ cmd = lp->cmd_head;
lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
lp->cmd_backlog--;
@@ -1281,7 +1281,7 @@ static void set_multicast_list(struct net_device *dev) {
lp->i596_config[8] |= 0x01;
}
- i596_add_cmd(dev, (struct i596_cmd *) &lp->set_conf);
+ i596_add_cmd(dev, &lp->set_conf);
}
}
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index cae17f4..353f57f6 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -571,7 +571,7 @@ static int init586(struct net_device *dev)
}
#endif
- ptr = alloc_rfa(dev,(void *)ptr); /* init receive-frame-area */
+ ptr = alloc_rfa(dev,ptr); /* init receive-frame-area */
/*
* alloc xmit-buffs / init xmit_cmds
@@ -584,7 +584,7 @@ static int init586(struct net_device *dev)
ptr = (char *) ptr + XMIT_BUFF_SIZE;
p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */
ptr = (char *) ptr + sizeof(struct tbd_struct);
- if((void *)ptr > (void *)dev->mem_end)
+ if(ptr > (void *)dev->mem_end)
{
printk("%s: not enough shared-mem for your configuration!\n",dev->name);
return 1;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 4fb47f1..cb66f57 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -376,9 +376,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
return 0;
}
-/**
- * allocates memory for a queue and registers pages in phyp
- */
+/* allocates memory for a queue and registers pages in phyp */
static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
int nr_pages, int wqe_size, int act_nr_sges,
struct ehea_adapter *adapter, int h_call_q_selector)
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 79b07ec..0cafe4f 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -122,8 +122,10 @@ config IGB_DCA
config IGB_PTP
bool "PTP Hardware Clock (PHC)"
- default y
- depends on IGB && PTP_1588_CLOCK
+ default n
+ depends on IGB && EXPERIMENTAL
+ select PPS
+ select PTP_1588_CLOCK
---help---
Say Y here if you want to use PTP Hardware Clock (PHC) in the
driver. Only the basic clock operations have been implemented.
@@ -223,7 +225,9 @@ config IXGBE_DCB
config IXGBE_PTP
bool "PTP Clock Support"
default n
- depends on IXGBE && PTP_1588_CLOCK
+ depends on IXGBE && EXPERIMENTAL
+ select PPS
+ select PTP_1588_CLOCK
---help---
Say Y here if you want support for 1588 Timestamping with a
PHC device, using the PTP 1588 Clock support. This is
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index c526279e4..3d68395 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -399,7 +399,7 @@ void e1000_set_media_type(struct e1000_hw *hw)
}
/**
- * e1000_reset_hw: reset the hardware completely
+ * e1000_reset_hw - reset the hardware completely
* @hw: Struct containing variables accessed by shared code
*
* Reset the transmit and receive units; mask and clear all interrupts.
@@ -546,7 +546,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
}
/**
- * e1000_init_hw: Performs basic configuration of the adapter.
+ * e1000_init_hw - Performs basic configuration of the adapter.
* @hw: Struct containing variables accessed by shared code
*
* Assumes that the controller has previously been reset and is in a
@@ -2591,7 +2591,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
* @hw: Struct containing variables accessed by shared code
* @speed: Speed of the connection
* @duplex: Duplex setting of the connection
-
+ *
* Detects the current speed and duplex settings of the hardware.
*/
s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
@@ -2959,7 +2959,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
* @hw: Struct containing variables accessed by shared code
* @reg_addr: address of the PHY register to write
* @data: data to write to the PHY
-
+ *
* Writes a value to a PHY register
*/
s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 7483ca0..3bfbb8d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -721,9 +721,7 @@ void e1000_reset(struct e1000_adapter *adapter)
e1000_release_manageability(adapter);
}
-/**
- * Dump the eeprom for users having checksum issues
- **/
+/* Dump the eeprom for users having checksum issues */
static void e1000_dump_eeprom(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -1078,18 +1076,18 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
netdev->priv_flags |= IFF_SUPP_NOFCS;
netdev->features |= netdev->hw_features;
- netdev->hw_features |= NETIF_F_RXCSUM;
- netdev->hw_features |= NETIF_F_RXALL;
- netdev->hw_features |= NETIF_F_RXFCS;
+ netdev->hw_features |= (NETIF_F_RXCSUM |
+ NETIF_F_RXALL |
+ NETIF_F_RXFCS);
if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= NETIF_F_HIGHDMA;
}
- netdev->vlan_features |= NETIF_F_TSO;
- netdev->vlan_features |= NETIF_F_HW_CSUM;
- netdev->vlan_features |= NETIF_F_SG;
+ netdev->vlan_features |= (NETIF_F_TSO |
+ NETIF_F_HW_CSUM |
+ NETIF_F_SG);
netdev->priv_flags |= IFF_UNICAST_FLT;
@@ -3056,14 +3054,13 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
mmiowb();
}
-/**
- * 82547 workaround to avoid controller hang in half-duplex environment.
+/* 82547 workaround to avoid controller hang in half-duplex environment.
* The workaround is to avoid queuing a large packet that would span
* the internal Tx FIFO ring boundary by notifying the stack to resend
* the packet at a later time. This gives the Tx FIFO an opportunity to
* flush all packets. When that occurs, we reset the Tx FIFO pointers
* to the beginning of the Tx FIFO.
- **/
+ */
#define E1000_FIFO_HDR 0x10
#define E1000_82547_PAD_LEN 0x3E0
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 351a409..76edbc1 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -103,6 +103,7 @@
#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index d863075..905e214 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -258,7 +258,8 @@ static int e1000_set_settings(struct net_device *netdev,
* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed
*/
- if (hw->phy.ops.check_reset_block(hw)) {
+ if (hw->phy.ops.check_reset_block &&
+ hw->phy.ops.check_reset_block(hw)) {
e_err("Cannot change link characteristics when SoL/IDER is active.\n");
return -EINVAL;
}
@@ -1615,7 +1616,8 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
* PHY loopback cannot be performed if SoL/IDER
* sessions are active
*/
- if (hw->phy.ops.check_reset_block(hw)) {
+ if (hw->phy.ops.check_reset_block &&
+ hw->phy.ops.check_reset_block(hw)) {
e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
*data = 0;
goto out;
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 026e8b3..a134399 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -709,7 +709,7 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw)
* In the case of the phy reset being blocked, we already have a link.
* We do not need to set it up again.
*/
- if (hw->phy.ops.check_reset_block(hw))
+ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
return 0;
/*
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a4b0435..ca477e8 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -496,7 +496,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
* @sk_buff: socket buffer with received data
**/
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
- __le16 csum, struct sk_buff *skb)
+ struct sk_buff *skb)
{
u16 status = (u16)status_err;
u8 errors = (u8)(status_err >> 24);
@@ -511,8 +511,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
if (status & E1000_RXD_STAT_IXSM)
return;
- /* TCP/UDP checksum error bit is set */
- if (errors & E1000_RXD_ERR_TCPE) {
+ /* TCP/UDP checksum error bit or IP checksum error bit is set */
+ if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
/* let the stack verify checksum errors */
adapter->hw_csum_err++;
return;
@@ -523,19 +523,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
return;
/* It must be a TCP or UDP packet with a valid checksum */
- if (status & E1000_RXD_STAT_TCPCS) {
- /* TCP checksum is good */
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- /*
- * IP fragment with UDP payload
- * Hardware complements the payload checksum, so we undo it
- * and then put the value in host order for further stack use.
- */
- __sum16 sum = (__force __sum16)swab16((__force u16)csum);
- skb->csum = csum_unfold(~sum);
- skb->ip_summed = CHECKSUM_COMPLETE;
- }
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
adapter->hw_csum_good++;
}
@@ -954,8 +942,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
skb_put(skb, length);
/* Receive Checksum Offload */
- e1000_rx_checksum(adapter, staterr,
- rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+ e1000_rx_checksum(adapter, staterr, skb);
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
@@ -1341,8 +1328,7 @@ copydone:
total_rx_bytes += skb->len;
total_rx_packets++;
- e1000_rx_checksum(adapter, staterr,
- rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+ e1000_rx_checksum(adapter, staterr, skb);
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
@@ -1512,9 +1498,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
}
}
- /* Receive Checksum Offload XXX recompute due to CRC strip? */
- e1000_rx_checksum(adapter, staterr,
- rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+ /* Receive Checksum Offload */
+ e1000_rx_checksum(adapter, staterr, skb);
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
@@ -2174,7 +2159,7 @@ void e1000e_release_hw_control(struct e1000_adapter *adapter)
}
/**
- * @e1000_alloc_ring - allocate memory for a ring structure
+ * e1000_alloc_ring_dma - allocate memory for a ring structure
**/
static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
struct e1000_ring *ring)
@@ -3098,19 +3083,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
/* Enable Receive Checksum Offload for TCP and UDP */
rxcsum = er32(RXCSUM);
- if (adapter->netdev->features & NETIF_F_RXCSUM) {
+ if (adapter->netdev->features & NETIF_F_RXCSUM)
rxcsum |= E1000_RXCSUM_TUOFL;
-
- /*
- * IPv4 payload checksum for UDP fragments must be
- * used in conjunction with packet-split.
- */
- if (adapter->rx_ps_pages)
- rxcsum |= E1000_RXCSUM_IPPCSE;
- } else {
+ else
rxcsum &= ~E1000_RXCSUM_TUOFL;
- /* no need to clear IPPCSE as it defaults to 0 */
- }
ew32(RXCSUM, rxcsum);
if (adapter->hw.mac.type == e1000_pch2lan) {
@@ -5241,22 +5217,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
/* Jumbo frame support */
- if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
- if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
- e_err("Jumbo Frames not supported.\n");
- return -EINVAL;
- }
-
- /*
- * IP payload checksum (enabled with jumbos/packet-split when
- * Rx checksum is enabled) and generation of RSS hash is
- * mutually exclusive in the hardware.
- */
- if ((netdev->features & NETIF_F_RXCSUM) &&
- (netdev->features & NETIF_F_RXHASH)) {
- e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled. Disable one of the receive offload features before enabling jumbos.\n");
- return -EINVAL;
- }
+ if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+ !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
+ e_err("Jumbo Frames not supported.\n");
+ return -EINVAL;
}
/* Supported frame sizes */
@@ -6030,17 +5994,6 @@ static int e1000_set_features(struct net_device *netdev,
NETIF_F_RXALL)))
return 0;
- /*
- * IP payload checksum (enabled with jumbos/packet-split when Rx
- * checksum is enabled) and generation of RSS hash is mutually
- * exclusive in the hardware.
- */
- if (adapter->rx_ps_pages &&
- (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) {
- e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames. Disable jumbos or enable only one of the receive offload features.\n");
- return -EINVAL;
- }
-
if (changed & NETIF_F_RXFCS) {
if (features & NETIF_F_RXFCS) {
adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
@@ -6237,8 +6190,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->hw.phy.ms_type = e1000_ms_hw_default;
}
- if (hw->phy.ops.check_reset_block(hw))
- e_info("PHY reset is blocked due to SOL/IDER session.\n");
+ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
+ dev_info(&pdev->dev,
+ "PHY reset is blocked due to SOL/IDER session.\n");
/* Set initial default active device features */
netdev->features = (NETIF_F_SG |
@@ -6288,7 +6242,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
break;
if (i == 2) {
- e_err("The NVM Checksum Is Not Valid\n");
+ dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
}
@@ -6298,13 +6252,15 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* copy the MAC address */
if (e1000e_read_mac_addr(&adapter->hw))
- e_err("NVM Read Error while reading MAC address\n");
+ dev_err(&pdev->dev,
+ "NVM Read Error while reading MAC address\n");
memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr)) {
- e_err("Invalid MAC Address: %pM\n", netdev->perm_addr);
+ dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
+ netdev->perm_addr);
err = -EIO;
goto err_eeprom;
}
@@ -6404,7 +6360,7 @@ err_register:
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_release_hw_control(adapter);
err_eeprom:
- if (!hw->phy.ops.check_reset_block(hw))
+ if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
e1000_phy_hw_reset(&adapter->hw);
err_hw_init:
kfree(adapter->tx_ring);
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 55cc1565b..dfbfa7f 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -199,16 +199,19 @@ static int __devinit e1000_validate_option(unsigned int *value,
case enable_option:
switch (*value) {
case OPTION_ENABLED:
- e_info("%s Enabled\n", opt->name);
+ dev_info(&adapter->pdev->dev, "%s Enabled\n",
+ opt->name);
return 0;
case OPTION_DISABLED:
- e_info("%s Disabled\n", opt->name);
+ dev_info(&adapter->pdev->dev, "%s Disabled\n",
+ opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
- e_info("%s set to %i\n", opt->name, *value);
+ dev_info(&adapter->pdev->dev, "%s set to %i\n",
+ opt->name, *value);
return 0;
}
break;
@@ -220,7 +223,8 @@ static int __devinit e1000_validate_option(unsigned int *value,
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
- e_info("%s\n", ent->str);
+ dev_info(&adapter->pdev->dev, "%s\n",
+ ent->str);
return 0;
}
}
@@ -230,8 +234,8 @@ static int __devinit e1000_validate_option(unsigned int *value,
BUG();
}
- e_info("Invalid %s value specified (%i) %s\n", opt->name, *value,
- opt->err);
+ dev_info(&adapter->pdev->dev, "Invalid %s value specified (%i) %s\n",
+ opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
@@ -251,8 +255,10 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
int bd = adapter->bd_number;
if (bd >= E1000_MAX_NIC) {
- e_notice("Warning: no configuration for board #%i\n", bd);
- e_notice("Using defaults for all values\n");
+ dev_notice(&adapter->pdev->dev,
+ "Warning: no configuration for board #%i\n", bd);
+ dev_notice(&adapter->pdev->dev,
+ "Using defaults for all values\n");
}
{ /* Transmit Interrupt Delay */
@@ -366,27 +372,32 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
* default values
*/
if (adapter->itr > 4)
- e_info("%s set to default %d\n", opt.name,
- adapter->itr);
+ dev_info(&adapter->pdev->dev,
+ "%s set to default %d\n", opt.name,
+ adapter->itr);
}
adapter->itr_setting = adapter->itr;
switch (adapter->itr) {
case 0:
- e_info("%s turned off\n", opt.name);
+ dev_info(&adapter->pdev->dev, "%s turned off\n",
+ opt.name);
break;
case 1:
- e_info("%s set to dynamic mode\n", opt.name);
+ dev_info(&adapter->pdev->dev,
+ "%s set to dynamic mode\n", opt.name);
adapter->itr = 20000;
break;
case 3:
- e_info("%s set to dynamic conservative mode\n",
- opt.name);
+ dev_info(&adapter->pdev->dev,
+ "%s set to dynamic conservative mode\n",
+ opt.name);
adapter->itr = 20000;
break;
case 4:
- e_info("%s set to simplified (2000-8000 ints) mode\n",
- opt.name);
+ dev_info(&adapter->pdev->dev,
+ "%s set to simplified (2000-8000 ints) mode\n",
+ opt.name);
break;
default:
/*
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 0334d01..b860d4f 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2155,9 +2155,11 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
s32 ret_val;
u32 ctrl;
- ret_val = phy->ops.check_reset_block(hw);
- if (ret_val)
- return 0;
+ if (phy->ops.check_reset_block) {
+ ret_val = phy->ops.check_reset_block(hw);
+ if (ret_val)
+ return 0;
+ }
ret_val = phy->ops.acquire(hw);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index e650839..5e84eaa 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -206,8 +206,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
break;
case e1000_i350:
- case e1000_i210:
- case e1000_i211:
mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
break;
default:
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 35d1e4f..10efcd8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -117,6 +117,7 @@
/* TX Rate Limit Registers */
#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
+#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */
#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
/* Split and Replication RX Control - RW */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index ae6d3f3..9e572dd 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -65,19 +65,30 @@ struct igb_adapter;
#define MAX_Q_VECTORS 8
/* Transmit and receive queues */
-#define IGB_MAX_RX_QUEUES ((adapter->vfs_allocated_count ? 2 : \
- (hw->mac.type > e1000_82575 ? 8 : 4)))
-#define IGB_MAX_RX_QUEUES_I210 4
+#define IGB_MAX_RX_QUEUES 8
+#define IGB_MAX_RX_QUEUES_82575 4
#define IGB_MAX_RX_QUEUES_I211 2
-#define IGB_MAX_TX_QUEUES 16
-#define IGB_MAX_TX_QUEUES_I210 4
-#define IGB_MAX_TX_QUEUES_I211 2
+#define IGB_MAX_TX_QUEUES 8
#define IGB_MAX_VF_MC_ENTRIES 30
#define IGB_MAX_VF_FUNCTIONS 8
#define IGB_MAX_VFTA_ENTRIES 128
#define IGB_82576_VF_DEV_ID 0x10CA
#define IGB_I350_VF_DEV_ID 0x1520
+/* NVM version defines */
+#define IGB_MAJOR_MASK 0xF000
+#define IGB_MINOR_MASK 0x0FF0
+#define IGB_BUILD_MASK 0x000F
+#define IGB_COMB_VER_MASK 0x00FF
+#define IGB_MAJOR_SHIFT 12
+#define IGB_MINOR_SHIFT 4
+#define IGB_COMB_VER_SHFT 8
+#define IGB_NVM_VER_INVALID 0xFFFF
+#define IGB_ETRACK_SHIFT 16
+#define NVM_ETRACK_WORD 0x0042
+#define NVM_COMB_VER_OFF 0x0083
+#define NVM_COMB_VER_PTR 0x003d
+
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
@@ -371,6 +382,7 @@ struct igb_adapter {
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
+ char fw_version[32];
};
#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -420,6 +432,7 @@ extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
extern bool igb_has_link(struct igb_adapter *adapter);
extern void igb_set_ethtool_ops(struct net_device *);
extern void igb_power_up_link(struct igb_adapter *);
+extern void igb_set_fw_version(struct igb_adapter *);
#ifdef CONFIG_IGB_PTP
extern void igb_ptp_init(struct igb_adapter *adapter);
extern void igb_ptp_remove(struct igb_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 812d4f9..a19c84c 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -710,6 +710,7 @@ static int igb_set_eeprom(struct net_device *netdev,
if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
hw->nvm.ops.update(hw);
+ igb_set_fw_version(adapter);
kfree(eeprom_buff);
return ret_val;
}
@@ -718,20 +719,16 @@ static void igb_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- u16 eeprom_data;
strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version));
- /* EEPROM image version # is reported as firmware version # for
- * 82575 controllers */
- adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data);
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%d.%d-%d",
- (eeprom_data & 0xF000) >> 12,
- (eeprom_data & 0x0FF0) >> 4,
- eeprom_data & 0x000F);
-
+ /*
+ * EEPROM image version # is reported as firmware version # for
+ * 82575 controllers
+ */
+ strlcpy(drvinfo->fw_version, adapter->fw_version,
+ sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_stats = IGB_STATS_LEN;
@@ -2271,6 +2268,38 @@ static void igb_ethtool_complete(struct net_device *netdev)
pm_runtime_put(&adapter->pdev->dev);
}
+#ifdef CONFIG_IGB_PTP
+static int igb_ethtool_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL) |
+ (1 << HWTSTAMP_FILTER_SOME) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+#endif
static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings,
.set_settings = igb_set_settings,
@@ -2299,6 +2328,9 @@ static const struct ethtool_ops igb_ethtool_ops = {
.set_coalesce = igb_set_coalesce,
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
+#ifdef CONFIG_IGB_PTP
+ .get_ts_info = igb_ethtool_get_ts_info,
+#endif
};
void igb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index dd3bfe8..60e3075 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -59,9 +59,9 @@
#endif
#include "igb.h"
-#define MAJ 3
-#define MIN 4
-#define BUILD 7
+#define MAJ 4
+#define MIN 0
+#define BUILD 1
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
char igb_driver_name[] = "igb";
@@ -1048,11 +1048,6 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter)
if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
numvecs += adapter->num_tx_queues;
- /* i210 and i211 can only have 4 MSIX vectors for rx/tx queues. */
- if ((adapter->hw.mac.type == e1000_i210)
- || (adapter->hw.mac.type == e1000_i211))
- numvecs = 4;
-
/* store the number of vectors reserved for queues */
adapter->num_q_vectors = numvecs;
@@ -1821,6 +1816,69 @@ static const struct net_device_ops igb_netdev_ops = {
};
/**
+ * igb_set_fw_version - Configure version string for ethtool
+ * @adapter: adapter struct
+ *
+ **/
+void igb_set_fw_version(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
+ u16 major, build, patch, fw_version;
+ u32 etrack_id;
+
+ hw->nvm.ops.read(hw, 5, 1, &fw_version);
+ if (adapter->hw.mac.type != e1000_i211) {
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
+ etrack_id = (eeprom_verh << IGB_ETRACK_SHIFT) | eeprom_verl;
+
+ /* combo image version needs to be found */
+ hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
+ if ((comb_offset != 0x0) &&
+ (comb_offset != IGB_NVM_VER_INVALID)) {
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+ + 1), 1, &comb_verh);
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
+ 1, &comb_verl);
+
+ /* Only display Option Rom if it exists and is valid */
+ if ((comb_verh && comb_verl) &&
+ ((comb_verh != IGB_NVM_VER_INVALID) &&
+ (comb_verl != IGB_NVM_VER_INVALID))) {
+ major = comb_verl >> IGB_COMB_VER_SHFT;
+ build = (comb_verl << IGB_COMB_VER_SHFT) |
+ (comb_verh >> IGB_COMB_VER_SHFT);
+ patch = comb_verh & IGB_COMB_VER_MASK;
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d%d, 0x%08x, %d.%d.%d",
+ (fw_version & IGB_MAJOR_MASK) >>
+ IGB_MAJOR_SHIFT,
+ (fw_version & IGB_MINOR_MASK) >>
+ IGB_MINOR_SHIFT,
+ (fw_version & IGB_BUILD_MASK),
+ etrack_id, major, build, patch);
+ goto out;
+ }
+ }
+ snprintf(adapter->fw_version, sizeof(adapter->fw_version),
+ "%d.%d%d, 0x%08x",
+ (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
+ (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
+ (fw_version & IGB_BUILD_MASK), etrack_id);
+ } else {
+ snprintf(adapter->fw_version, sizeof(adapter->fw_version),
+ "%d.%d%d",
+ (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
+ (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
+ (fw_version & IGB_BUILD_MASK));
+ }
+out:
+ return;
+}
+
+/**
* igb_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in igb_pci_tbl
@@ -2030,6 +2088,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
goto err_eeprom;
}
+ /* get firmware version for ethtool -i */
+ igb_set_fw_version(adapter);
+
setup_timer(&adapter->watchdog_timer, igb_watchdog,
(unsigned long) adapter);
setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
@@ -2338,6 +2399,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
+ u32 max_rss_queues;
pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
@@ -2370,40 +2432,69 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
} else
adapter->vfs_allocated_count = max_vfs;
break;
- case e1000_i210:
- case e1000_i211:
- adapter->vfs_allocated_count = 0;
- break;
default:
break;
}
#endif /* CONFIG_PCI_IOV */
+
+ /* Determine the maximum number of RSS queues supported. */
switch (hw->mac.type) {
+ case e1000_i211:
+ max_rss_queues = IGB_MAX_RX_QUEUES_I211;
+ break;
+ case e1000_82575:
case e1000_i210:
- adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I210,
- num_online_cpus());
+ max_rss_queues = IGB_MAX_RX_QUEUES_82575;
break;
+ case e1000_i350:
+ /* I350 cannot do RSS and SR-IOV at the same time */
+ if (!!adapter->vfs_allocated_count) {
+ max_rss_queues = 1;
+ break;
+ }
+ /* fall through */
+ case e1000_82576:
+ if (!!adapter->vfs_allocated_count) {
+ max_rss_queues = 2;
+ break;
+ }
+ /* fall through */
+ case e1000_82580:
+ default:
+ max_rss_queues = IGB_MAX_RX_QUEUES;
+ break;
+ }
+
+ adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
+
+ /* Determine if we need to pair queues. */
+ switch (hw->mac.type) {
+ case e1000_82575:
case e1000_i211:
- adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I211,
- num_online_cpus());
+ /* Device supports enough interrupts without queue pairing. */
break;
+ case e1000_82576:
+ /*
+ * If VFs are going to be allocated with RSS queues then we
+ * should pair the queues in order to conserve interrupts due
+ * to limited supply.
+ */
+ if ((adapter->rss_queues > 1) &&
+ (adapter->vfs_allocated_count > 6))
+ adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
+ /* fall through */
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i210:
default:
- adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES,
- num_online_cpus());
+ /*
+ * If rss_queues > half of max_rss_queues, pair the queues in
+ * order to conserve interrupts due to limited supply.
+ */
+ if (adapter->rss_queues > (max_rss_queues / 2))
+ adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
break;
}
- /* i350 cannot do RSS and SR-IOV at the same time */
- if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
- adapter->rss_queues = 1;
-
- /*
- * if rss_queues > 4 or vfs are going to be allocated with rss_queues
- * then we should combine the queues into a queue pair in order to
- * conserve interrupts due to limited supply
- */
- if ((adapter->rss_queues > 4) ||
- ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
- adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
/* Setup and initialize a copy of the hw vlan table array */
adapter->shadow_vfta = kzalloc(sizeof(u32) *
@@ -5686,6 +5777,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
/**
* igb_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: pointer to q_vector containing needed info
+ *
* returns true if ring is completely cleaned
**/
static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
@@ -6997,6 +7089,11 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
}
wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
+ /*
+ * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
+ * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
+ */
+ wr32(E1000_RTTBCNRM, 0x14);
wr32(E1000_RTTBCNRC, bcnrc_val);
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index d5ee7fa..c846ea9 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -330,7 +330,17 @@ void igb_ptp_init(struct igb_adapter *adapter)
void igb_ptp_remove(struct igb_adapter *adapter)
{
- cancel_delayed_work_sync(&adapter->overflow_work);
+ switch (adapter->hw.mac.type) {
+ case e1000_i211:
+ case e1000_i210:
+ case e1000_i350:
+ case e1000_82580:
+ case e1000_82576:
+ cancel_delayed_work_sync(&adapter->overflow_work);
+ break;
+ default:
+ return;
+ }
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 8ce6706..90eef07 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -357,21 +357,28 @@ static int igbvf_set_coalesce(struct net_device *netdev,
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) ||
- ((ec->rx_coalesce_usecs > 3) &&
- (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) ||
- (ec->rx_coalesce_usecs == 2))
- return -EINVAL;
-
- /* convert to rate of irq's per second */
- if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
+ if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) &&
+ (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
+ adapter->current_itr = ec->rx_coalesce_usecs << 2;
+ adapter->requested_itr = 1000000000 /
+ (adapter->current_itr * 256);
+ } else if ((ec->rx_coalesce_usecs == 3) ||
+ (ec->rx_coalesce_usecs == 2)) {
adapter->current_itr = IGBVF_START_ITR;
adapter->requested_itr = ec->rx_coalesce_usecs;
- } else {
- adapter->current_itr = ec->rx_coalesce_usecs << 2;
+ } else if (ec->rx_coalesce_usecs == 0) {
+ /*
+ * The user's desire is to turn off interrupt throttling
+ * altogether, but due to HW limitations, we can't do that.
+ * Instead we set a very small value in EITR, which would
+ * allow ~967k interrupts per second, but allow the adapter's
+ * internal clocking to still function properly.
+ */
+ adapter->current_itr = 4;
adapter->requested_itr = 1000000000 /
(adapter->current_itr * 256);
- }
+ } else
+ return -EINVAL;
writel(adapter->current_itr,
hw->hw_addr + adapter->rx_ring->itr_register);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 8ec74b0..0696abf 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -766,6 +766,7 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
/**
* igbvf_clean_tx_irq - Reclaim resources after transmit completes
* @adapter: board private structure
+ *
* returns true if ring is completely cleaned
**/
static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index 30a6cc4..eea0e10 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -283,7 +283,8 @@ static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set)
return err;
}
-/** e1000_rlpml_set_vf - Set the maximum receive packet length
+/**
+ * e1000_rlpml_set_vf - Set the maximum receive packet length
* @hw: pointer to the HW structure
* @max_size: value to assign to max frame size
**/
@@ -302,7 +303,7 @@ void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size)
* e1000_rar_set_vf - set device MAC address
* @hw: pointer to the HW structure
* @addr: pointer to the receive address
- * @index receive address array register
+ * @index: receive address array register
**/
static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
{
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 5fce363..aab649f 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -2276,9 +2276,9 @@ static void ixgb_netpoll(struct net_device *dev)
#endif
/**
- * ixgb_io_error_detected() - called when PCI error is detected
- * @pdev pointer to pci device with error
- * @state pci channel state after error
+ * ixgb_io_error_detected - called when PCI error is detected
+ * @pdev: pointer to pci device with error
+ * @state: pci channel state after error
*
* This callback is called by the PCI subsystem whenever
* a PCI bus error is detected.
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 0bdf06b..5fd5d04 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -34,11 +34,11 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
- ixgbe_mbx.o ixgbe_x540.o ixgbe_sysfs.o ixgbe_lib.o
+ ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o
-
+ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 3ef3c52..2ffdc8f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -196,7 +196,7 @@ enum ixgbe_ring_state_t {
__IXGBE_HANG_CHECK_ARMED,
__IXGBE_RX_RSC_ENABLED,
__IXGBE_RX_CSUM_UDP_ZERO_ERR,
- __IXGBE_RX_FCOE_BUFSZ,
+ __IXGBE_RX_FCOE,
};
#define check_for_tx_hang(ring) \
@@ -278,8 +278,10 @@ enum ixgbe_ring_f_enum {
#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
#endif /* IXGBE_FCOE */
struct ixgbe_ring_feature {
- int indices;
- int mask;
+ u16 limit; /* upper limit on feature indices */
+ u16 indices; /* current value of indices */
+ u16 mask; /* Mask used for feature to ring mapping */
+ u16 offset; /* offset to start of feature */
} ____cacheline_internodealigned_in_smp;
/*
@@ -290,7 +292,7 @@ struct ixgbe_ring_feature {
#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
{
- return test_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state) ? 1 : 0;
+ return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0;
}
#else
#define ixgbe_rx_pg_order(_ring) 0
@@ -315,7 +317,7 @@ struct ixgbe_ring_container {
? 8 : 1)
#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
-/* MAX_MSIX_Q_VECTORS of these are allocated,
+/* MAX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector.
*/
struct ixgbe_q_vector {
@@ -401,11 +403,11 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
#define NON_Q_VECTORS (OTHER_VECTOR)
#define MAX_MSIX_VECTORS_82599 64
-#define MAX_MSIX_Q_VECTORS_82599 64
+#define MAX_Q_VECTORS_82599 64
#define MAX_MSIX_VECTORS_82598 18
-#define MAX_MSIX_Q_VECTORS_82598 16
+#define MAX_Q_VECTORS_82598 16
-#define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82599
+#define MAX_Q_VECTORS MAX_Q_VECTORS_82599
#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
#define MIN_MSIX_Q_VECTORS 1
@@ -496,7 +498,7 @@ struct ixgbe_adapter {
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
- struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+ struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS];
/* DCB parameters */
struct ieee_pfc *ixgbe_ieee_pfc;
@@ -507,8 +509,8 @@ struct ixgbe_adapter {
u8 dcbx_cap;
enum ixgbe_fc_mode last_lfc_mode;
- int num_msix_vectors;
- int max_msix_q_vectors; /* true count of q_vectors for device */
+ int num_q_vectors; /* current number of q_vectors for device */
+ int max_q_vectors; /* true count of q_vectors for device */
struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
struct msix_entry *msix_entries;
@@ -561,6 +563,7 @@ struct ixgbe_adapter {
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
+ int rx_hwtstamp_filter;
u32 base_incval;
u32 cycle_speed;
#endif /* CONFIG_IXGBE_PTP */
@@ -718,6 +721,7 @@ extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb);
extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+ union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb);
extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
struct ifreq *ifr, int cmd);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index dee64d2..e7dddfd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -241,7 +241,9 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
/* Determine 1G link capabilities off of SFP+ type */
if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
*negotiation = true;
goto out;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 77ac41f..bb7fde4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -3132,7 +3132,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
}
/**
- * ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from
+ * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
* the EEPROM
* @hw: pointer to hardware structure
* @wwnn_prefix: the alternative WWNN prefix
@@ -3325,6 +3325,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw,
* ixgbe_calculate_checksum - Calculate checksum for buffer
* @buffer: pointer to EEPROM
* @length: size of EEPROM to calculate a checksum for
+ *
* Calculates the checksum for some buffer on a specified length. The
* checksum calculated is returned.
**/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 3178f1e..8e1be50 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -154,100 +154,60 @@ static int ixgbe_get_settings(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ ixgbe_link_speed supported_link;
u32 link_speed = 0;
+ bool autoneg;
bool link_up;
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->autoneg = AUTONEG_ENABLE;
- ecmd->transceiver = XCVR_EXTERNAL;
- if ((hw->phy.media_type == ixgbe_media_type_copper) ||
- (hw->phy.multispeed_fiber)) {
- ecmd->supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg);
-
- switch (hw->mac.type) {
- case ixgbe_mac_X540:
- ecmd->supported |= SUPPORTED_100baseT_Full;
- break;
- default:
- break;
- }
-
- ecmd->advertising = ADVERTISED_Autoneg;
- if (hw->phy.autoneg_advertised) {
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_100_FULL)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_10GB_FULL)
- ecmd->advertising |= ADVERTISED_10000baseT_Full;
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_1GB_FULL)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
- } else {
- /*
- * Default advertised modes in case
- * phy.autoneg_advertised isn't set.
- */
- ecmd->advertising |= (ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full);
- if (hw->mac.type == ixgbe_mac_X540)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- }
-
- if (hw->phy.media_type == ixgbe_media_type_copper) {
- ecmd->supported |= SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_TP;
- ecmd->port = PORT_TP;
- } else {
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_FIBRE;
- }
- } else if (hw->phy.media_type == ixgbe_media_type_backplane) {
- /* Set as FIBRE until SERDES defined in kernel */
- if (hw->device_id == IXGBE_DEV_ID_82598_BX) {
- ecmd->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- ecmd->autoneg = AUTONEG_DISABLE;
- } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) ||
- (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
- ecmd->supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_Autoneg |
- ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- } else {
- ecmd->supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- }
+ hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
+
+ /* set the supported link speeds */
+ if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
+ ecmd->supported |= SUPPORTED_10000baseT_Full;
+ if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
+ ecmd->supported |= SUPPORTED_1000baseT_Full;
+ if (supported_link & IXGBE_LINK_SPEED_100_FULL)
+ ecmd->supported |= SUPPORTED_100baseT_Full;
+
+ /* set the advertised speeds */
+ if (hw->phy.autoneg_advertised) {
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+ ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
} else {
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising = (ADVERTISED_10000baseT_Full |
- ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- ecmd->autoneg = AUTONEG_DISABLE;
+ /* default modes in case phy.autoneg_advertised isn't set */
+ if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
+ ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ if (supported_link & IXGBE_LINK_SPEED_100_FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
}
- /* Get PHY type */
+ if (autoneg) {
+ ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ ecmd->autoneg = AUTONEG_ENABLE;
+ } else
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ /* Determine the remaining settings based on the PHY type. */
switch (adapter->hw.phy.type) {
case ixgbe_phy_tn:
case ixgbe_phy_aq:
case ixgbe_phy_cu_unknown:
- /* Copper 10G-BASET */
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
ecmd->port = PORT_TP;
break;
case ixgbe_phy_qt:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_FIBRE;
break;
case ixgbe_phy_nl:
@@ -257,42 +217,59 @@ static int ixgbe_get_settings(struct net_device *netdev,
case ixgbe_phy_sfp_avago:
case ixgbe_phy_sfp_intel:
case ixgbe_phy_sfp_unknown:
- switch (adapter->hw.phy.sfp_type) {
/* SFP+ devices, further checking needed */
+ switch (adapter->hw.phy.sfp_type) {
case ixgbe_sfp_type_da_cu:
case ixgbe_sfp_type_da_cu_core0:
case ixgbe_sfp_type_da_cu_core1:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_DA;
break;
case ixgbe_sfp_type_sr:
case ixgbe_sfp_type_lr:
case ixgbe_sfp_type_srlr_core0:
case ixgbe_sfp_type_srlr_core1:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_FIBRE;
break;
case ixgbe_sfp_type_not_present:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_NONE;
break;
case ixgbe_sfp_type_1g_cu_core0:
case ixgbe_sfp_type_1g_cu_core1:
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
ecmd->port = PORT_TP;
- ecmd->supported = SUPPORTED_TP;
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_TP);
+ break;
+ case ixgbe_sfp_type_1g_sx_core0:
+ case ixgbe_sfp_type_1g_sx_core1:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
break;
case ixgbe_sfp_type_unknown:
default:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_OTHER;
break;
}
break;
case ixgbe_phy_xaui:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_NONE;
break;
case ixgbe_phy_unknown:
case ixgbe_phy_generic:
case ixgbe_phy_sfp_unsupported:
default:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_OTHER;
break;
}
@@ -2113,7 +2090,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector;
int i;
- int num_vectors;
u16 tx_itr_param, rx_itr_param;
bool need_reset = false;
@@ -2149,12 +2125,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
/* check the old value and enable RSC if necessary */
need_reset = ixgbe_update_rsc(adapter);
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- else
- num_vectors = 1;
-
- for (i = 0; i < num_vectors; i++) {
+ for (i = 0; i < adapter->num_q_vectors; i++) {
q_vector = adapter->q_vector[i];
if (q_vector->tx.count && !q_vector->rx.count)
/* tx only */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index bc07933..0922ece 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -38,7 +38,7 @@
/**
* ixgbe_fcoe_clear_ddp - clear the given ddp context
- * @ddp - ptr to the ixgbe_fcoe_ddp
+ * @ddp: ptr to the ixgbe_fcoe_ddp
*
* Returns : none
*
@@ -674,7 +674,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
if (adapter->ring_feature[RING_F_FCOE].indices) {
/* Use multiple rx queues for FCoE by redirection table */
for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
- fcoe_i = f->mask + i % f->indices;
+ fcoe_i = f->offset + i % f->indices;
fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
@@ -683,7 +683,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
} else {
/* Use single rx queue for FCoE */
- fcoe_i = f->mask;
+ fcoe_i = f->offset;
fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
@@ -691,7 +691,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
}
/* send FIP frames to the first FCoE queue */
- fcoe_i = f->mask;
+ fcoe_i = f->offset;
fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
IXGBE_ETQS_QUEUE_EN |
@@ -770,7 +770,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
ixgbe_clear_interrupt_scheme(adapter);
adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
- adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE;
+ adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
netdev->features |= NETIF_F_FCOE_CRC;
netdev->features |= NETIF_F_FSO;
netdev->features |= NETIF_F_FCOE_MTU;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index af1a531..83eadd0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -138,30 +138,6 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
}
#endif
-/**
- * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
- * @adapter: board private structure to initialize
- *
- * Cache the descriptor ring offsets for Flow Director to the assigned rings.
- *
- **/
-static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
-{
- int i;
- bool ret = false;
-
- if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
- (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
- for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->reg_idx = i;
- for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i]->reg_idx = i;
- ret = true;
- }
-
- return ret;
-}
-
#ifdef IXGBE_FCOE
/**
* ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
@@ -180,17 +156,14 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
return false;
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
- ixgbe_cache_ring_fdir(adapter);
- else
- ixgbe_cache_ring_rss(adapter);
+ ixgbe_cache_ring_rss(adapter);
- fcoe_rx_i = f->mask;
- fcoe_tx_i = f->mask;
+ fcoe_rx_i = f->offset;
+ fcoe_tx_i = f->offset;
}
for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
- adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
- adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
+ adapter->rx_ring[f->offset + i]->reg_idx = fcoe_rx_i;
+ adapter->tx_ring[f->offset + i]->reg_idx = fcoe_tx_i;
}
return true;
}
@@ -244,15 +217,12 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
return;
#endif /* IXGBE_FCOE */
- if (ixgbe_cache_ring_fdir(adapter))
- return;
-
if (ixgbe_cache_ring_rss(adapter))
return;
}
/**
- * ixgbe_set_sriov_queues: Allocate queues for IOV use
+ * ixgbe_set_sriov_queues - Allocate queues for IOV use
* @adapter: board private structure to initialize
*
* IOV doesn't actually use anything, so just NAK the
@@ -265,72 +235,55 @@ static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_set_rss_queues: Allocate queues for RSS
+ * ixgbe_set_rss_queues - Allocate queues for RSS
* @adapter: board private structure to initialize
*
* This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
* to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
*
**/
-static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
+static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
{
- bool ret = false;
- struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
+ struct ixgbe_ring_feature *f;
+ u16 rss_i;
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- f->mask = 0xF;
- adapter->num_rx_queues = f->indices;
- adapter->num_tx_queues = f->indices;
- ret = true;
+ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ return false;
}
- return ret;
-}
+ /* set mask for 16 queue limit of RSS */
+ f = &adapter->ring_feature[RING_F_RSS];
+ rss_i = f->limit;
-/**
- * ixgbe_set_fdir_queues: Allocate queues for Flow Director
- * @adapter: board private structure to initialize
- *
- * Flow Director is an advanced Rx filter, attempting to get Rx flows back
- * to the original CPU that initiated the Tx session. This runs in addition
- * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
- * Rx load across CPUs using RSS.
- *
- **/
-static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
-{
- bool ret = false;
- struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
-
- f_fdir->indices = min_t(int, num_online_cpus(), f_fdir->indices);
- f_fdir->mask = 0;
+ f->indices = rss_i;
+ f->mask = 0xF;
/*
- * Use RSS in addition to Flow Director to ensure the best
+ * Use Flow Director in addition to RSS to ensure the best
* distribution of flows across cores, even when an FDIR flow
* isn't matched.
*/
- if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
- (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
- adapter->num_tx_queues = f_fdir->indices;
- adapter->num_rx_queues = f_fdir->indices;
- ret = true;
- } else {
- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ f = &adapter->ring_feature[RING_F_FDIR];
+
+ f->indices = min_t(u16, num_online_cpus(), f->limit);
+ rss_i = max_t(u16, rss_i, f->indices);
}
- return ret;
+
+ adapter->num_rx_queues = rss_i;
+ adapter->num_tx_queues = rss_i;
+
+ return true;
}
#ifdef IXGBE_FCOE
/**
- * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
+ * ixgbe_set_fcoe_queues - Allocate queues for Fiber Channel over Ethernet (FCoE)
* @adapter: board private structure to initialize
*
* FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
- * The ring feature mask is not used as a mask for FCoE, as it can take any 8
- * rx queues out of the max number of rx queues, instead, it is used as the
- * index of the first rx queue used by FCoE.
- *
+ * Offset is used as the index of the first rx queue used by FCoE.
**/
static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
{
@@ -339,21 +292,18 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
return false;
- f->indices = min_t(int, num_online_cpus(), f->indices);
+ f->indices = min_t(int, num_online_cpus(), f->limit);
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
e_info(probe, "FCoE enabled with RSS\n");
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
- ixgbe_set_fdir_queues(adapter);
- else
- ixgbe_set_rss_queues(adapter);
+ ixgbe_set_rss_queues(adapter);
}
/* adding FCoE rx rings to the end */
- f->mask = adapter->num_rx_queues;
+ f->offset = adapter->num_rx_queues;
adapter->num_rx_queues += f->indices;
adapter->num_tx_queues += f->indices;
@@ -388,7 +338,7 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
#ifdef IXGBE_FCOE
/* FCoE enabled queues require special configuration indexed
- * by feature specific indices and mask. Here we map FCoE
+ * by feature specific indices and offset. Here we map FCoE
* indices onto the DCB queue pairs allowing FCoE to own
* configuration later.
*/
@@ -401,7 +351,7 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
tc = prio_tc[adapter->fcoe.up];
f->indices = dev->tc_to_txq[tc].count;
- f->mask = dev->tc_to_txq[tc].offset;
+ f->offset = dev->tc_to_txq[tc].offset;
}
#endif
@@ -410,7 +360,7 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
#endif
/**
- * ixgbe_set_num_queues: Allocate queues for device, feature dependent
+ * ixgbe_set_num_queues - Allocate queues for device, feature dependent
* @adapter: board private structure to initialize
*
* This is the top level queue allocation routine. The order here is very
@@ -441,9 +391,6 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
goto done;
#endif /* IXGBE_FCOE */
- if (ixgbe_set_fdir_queues(adapter))
- goto done;
-
if (ixgbe_set_rss_queues(adapter))
goto done;
@@ -507,8 +454,8 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
* of max_msix_q_vectors + NON_Q_VECTORS, or the number of
* vectors we were allocated.
*/
- adapter->num_msix_vectors = min(vectors,
- adapter->max_msix_q_vectors + NON_Q_VECTORS);
+ vectors -= NON_Q_VECTORS;
+ adapter->num_q_vectors = min(vectors, adapter->max_q_vectors);
}
}
@@ -632,9 +579,9 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
struct ixgbe_ring_feature *f;
f = &adapter->ring_feature[RING_F_FCOE];
- if ((rxr_idx >= f->mask) &&
- (rxr_idx < f->mask + f->indices))
- set_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state);
+ if ((rxr_idx >= f->offset) &&
+ (rxr_idx < f->offset + f->indices))
+ set_bit(__IXGBE_RX_FCOE, &ring->state);
}
#endif /* IXGBE_FCOE */
@@ -695,7 +642,7 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
**/
static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
{
- int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int q_vectors = adapter->num_q_vectors;
int rxr_remaining = adapter->num_rx_queues;
int txr_remaining = adapter->num_tx_queues;
int rxr_idx = 0, txr_idx = 0, v_idx = 0;
@@ -739,10 +686,12 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
return 0;
err_out:
- while (v_idx) {
- v_idx--;
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
+
+ while (v_idx--)
ixgbe_free_q_vector(adapter, v_idx);
- }
return -ENOMEM;
}
@@ -757,14 +706,13 @@ err_out:
**/
static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
{
- int v_idx, q_vectors;
+ int v_idx = adapter->num_q_vectors;
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- else
- q_vectors = 1;
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
- for (v_idx = 0; v_idx < q_vectors; v_idx++)
+ while (v_idx--)
ixgbe_free_q_vector(adapter, v_idx);
}
@@ -844,6 +792,8 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
if (err)
return err;
+ adapter->num_q_vectors = 1;
+
err = pci_enable_msi(adapter->pdev);
if (!err) {
adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index bf20457..d3cf887 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -516,7 +516,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
}
-/*
+/**
* ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
* @adapter: pointer to adapter struct
* @direction: 0 for Rx, 1 for Tx, -1 for other causes
@@ -790,12 +790,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
total_packets += tx_buffer->gso_segs;
#ifdef CONFIG_IXGBE_PTP
- if (unlikely(tx_buffer->tx_flags &
- IXGBE_TX_FLAGS_TSTAMP))
- ixgbe_ptp_tx_hwtstamp(q_vector,
- tx_buffer->skb);
-
+ if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
+ ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
#endif
+
/* free the skb */
dev_kfree_skb_any(tx_buffer->skb);
@@ -995,7 +993,6 @@ out_no_update:
static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
{
- int num_q_vectors;
int i;
if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
@@ -1004,12 +1001,7 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
/* always use CB2 mode, difference is masked in the CB driver */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- else
- num_q_vectors = 1;
-
- for (i = 0; i < num_q_vectors; i++) {
+ for (i = 0; i < adapter->num_q_vectors; i++) {
adapter->q_vector[i]->cpu = -1;
ixgbe_update_dca(adapter->q_vector[i]);
}
@@ -1058,17 +1050,17 @@ static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
#ifdef IXGBE_FCOE
/**
* ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
- * @adapter: address of board private structure
+ * @ring: structure containing ring specific data
* @rx_desc: advanced rx descriptor
*
* Returns : true if it is FCoE pkt
*/
-static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter,
+static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
union ixgbe_adv_rx_desc *rx_desc)
{
__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
- return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+ return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
(cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
@@ -1148,7 +1140,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
/* alloc new page for storage */
if (likely(!page)) {
- page = alloc_pages(GFP_ATOMIC | __GFP_COLD,
+ page = alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP,
ixgbe_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_rx_page_failed++;
@@ -1390,6 +1382,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ struct net_device *dev = rx_ring->netdev;
+
ixgbe_update_rsc_stats(rx_ring, skb);
ixgbe_rx_hash(rx_ring, rx_desc, skb);
@@ -1397,18 +1391,18 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
ixgbe_rx_checksum(rx_ring, rx_desc, skb);
#ifdef CONFIG_IXGBE_PTP
- if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))
- ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
+ ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
#endif
- if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
+ if ((dev->features & NETIF_F_HW_VLAN_RX) &&
+ ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
__vlan_hwaccel_put_tag(skb, vid);
}
skb_record_rx_queue(skb, rx_ring->queue_index);
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ skb->protocol = eth_type_trans(skb, dev);
}
static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
@@ -1546,6 +1540,12 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
skb->truesize -= ixgbe_rx_bufsz(rx_ring);
}
+#ifdef IXGBE_FCOE
+ /* do not attempt to pad FCoE Frames as this will disrupt DDP */
+ if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
+ return false;
+
+#endif
/* if skb_pad returns an error the skb was freed */
if (unlikely(skb->len < 60)) {
int pad_len = 60 - skb->len;
@@ -1772,7 +1772,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
#ifdef IXGBE_FCOE
/* if ddp, not passing to ULD unless for FCP_RSP or error */
- if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {
+ if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
if (!ddp_bytes) {
dev_kfree_skb_any(skb);
@@ -1825,11 +1825,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
{
struct ixgbe_q_vector *q_vector;
- int q_vectors, v_idx;
+ int v_idx;
u32 mask;
- q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
/* Populate MSIX to EITR Select */
if (adapter->num_vfs > 32) {
u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
@@ -1840,7 +1838,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
* Populate the IVAR table and set the ITR values to the
* corresponding register.
*/
- for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+ for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
struct ixgbe_ring *ring;
q_vector = adapter->q_vector[v_idx];
@@ -2404,11 +2402,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
int vector, err;
int ri = 0, ti = 0;
- for (vector = 0; vector < q_vectors; vector++) {
+ for (vector = 0; vector < adapter->num_q_vectors; vector++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
struct msix_entry *entry = &adapter->msix_entries[vector];
@@ -2563,30 +2560,28 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
{
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- int i, q_vectors;
+ int vector;
- q_vectors = adapter->num_msix_vectors;
- i = q_vectors - 1;
- free_irq(adapter->msix_entries[i].vector, adapter);
- i--;
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+ free_irq(adapter->pdev->irq, adapter);
+ return;
+ }
- for (; i >= 0; i--) {
- /* free only the irqs that were actually requested */
- if (!adapter->q_vector[i]->rx.ring &&
- !adapter->q_vector[i]->tx.ring)
- continue;
+ for (vector = 0; vector < adapter->num_q_vectors; vector++) {
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
+ struct msix_entry *entry = &adapter->msix_entries[vector];
- /* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(adapter->msix_entries[i].vector,
- NULL);
+ /* free only the irqs that were actually requested */
+ if (!q_vector->rx.ring && !q_vector->tx.ring)
+ continue;
- free_irq(adapter->msix_entries[i].vector,
- adapter->q_vector[i]);
- }
- } else {
- free_irq(adapter->pdev->irq, adapter);
+ /* clear the affinity_mask in the IRQ descriptor */
+ irq_set_affinity_hint(entry->vector, NULL);
+
+ free_irq(entry->vector, q_vector);
}
+
+ free_irq(adapter->msix_entries[vector++].vector, adapter);
}
/**
@@ -2610,9 +2605,12 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
}
IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- int i;
- for (i = 0; i < adapter->num_msix_vectors; i++)
- synchronize_irq(adapter->msix_entries[i].vector);
+ int vector;
+
+ for (vector = 0; vector < adapter->num_q_vectors; vector++)
+ synchronize_irq(adapter->msix_entries[vector].vector);
+
+ synchronize_irq(adapter->msix_entries[vector++].vector);
} else {
synchronize_irq(adapter->pdev->irq);
}
@@ -2849,40 +2847,34 @@ static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring)
{
+ struct ixgbe_hw *hw = &adapter->hw;
u32 srrctl;
u8 reg_idx = rx_ring->reg_idx;
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82598EB: {
- struct ixgbe_ring_feature *feature = adapter->ring_feature;
- const int mask = feature[RING_F_RSS].mask;
- reg_idx = reg_idx & mask;
- }
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- default:
- break;
- }
-
- srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ u16 mask = adapter->ring_feature[RING_F_RSS].mask;
- srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
- srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
- if (adapter->num_vfs)
- srrctl |= IXGBE_SRRCTL_DROP_EN;
+ /*
+ * if VMDq is not active we must program one srrctl register
+ * per RSS queue since we have enabled RDRXCTL.MVMEN
+ */
+ reg_idx &= mask;
+ }
- srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK;
+ /* configure header buffer length, needed for RSC */
+ srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ /* configure the packet buffer length */
#if PAGE_SIZE > IXGBE_MAX_RXBUFFER
srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
#else
srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
#endif
+
+ /* configure descriptor type */
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
+ IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
}
static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
@@ -3555,37 +3547,21 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
{
int q_idx;
- struct ixgbe_q_vector *q_vector;
- int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
- /* legacy and MSI only use one vector */
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
- q_vectors = 1;
- for (q_idx = 0; q_idx < q_vectors; q_idx++) {
- q_vector = adapter->q_vector[q_idx];
- napi_enable(&q_vector->napi);
- }
+ for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
+ napi_enable(&adapter->q_vector[q_idx]->napi);
}
static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
{
int q_idx;
- struct ixgbe_q_vector *q_vector;
- int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- /* legacy and MSI only use one vector */
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
- q_vectors = 1;
-
- for (q_idx = 0; q_idx < q_vectors; q_idx++) {
- q_vector = adapter->q_vector[q_idx];
- napi_disable(&q_vector->napi);
- }
+ for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
+ napi_disable(&adapter->q_vector[q_idx]->napi);
}
#ifdef CONFIG_IXGBE_DCB
-/*
+/**
* ixgbe_configure_dcb - Configure DCB hardware
* @adapter: ixgbe adapter struct
*
@@ -3607,10 +3583,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
if (hw->mac.type == ixgbe_mac_82598EB)
netif_set_gso_max_size(adapter->netdev, 32768);
-
- /* Enable VLAN tag insert/strip */
- adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
-
hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
#ifdef IXGBE_FCOE
@@ -3656,11 +3628,11 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
/* Additional bittime to account for IXGBE framing */
#define IXGBE_ETH_FRAMING 20
-/*
+/**
* ixgbe_hpbthresh - calculate high water mark for flow control
*
* @adapter: board private structure to calculate for
- * @pb - packet buffer to calculate
+ * @pb: packet buffer to calculate
*/
static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
{
@@ -3720,11 +3692,11 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
return marker;
}
-/*
+/**
* ixgbe_lpbthresh - calculate low water mark for for flow control
*
* @adapter: board private structure to calculate for
- * @pb - packet buffer to calculate
+ * @pb: packet buffer to calculate
*/
static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
{
@@ -4408,18 +4380,18 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
/* Set capability flags */
rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
- adapter->ring_feature[RING_F_RSS].indices = rss;
+ adapter->ring_feature[RING_F_RSS].limit = rss;
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
if (hw->device_id == IXGBE_DEV_ID_82598AT)
adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
- adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
+ adapter->max_q_vectors = MAX_Q_VECTORS_82598;
break;
case ixgbe_mac_X540:
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
case ixgbe_mac_82599EB:
- adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
+ adapter->max_q_vectors = MAX_Q_VECTORS_82599;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
@@ -4427,13 +4399,12 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
/* Flow Director hash filters enabled */
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->atr_sample_rate = 20;
- adapter->ring_feature[RING_F_FDIR].indices =
+ adapter->ring_feature[RING_F_FDIR].limit =
IXGBE_MAX_FDIR_INDICES;
adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
#ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
- adapter->ring_feature[RING_F_FCOE].indices = 0;
#ifdef CONFIG_IXGBE_DCB
/* Default traffic class to use for FCoE */
adapter->fcoe.up = IXGBE_FCOE_DEFTC;
@@ -5241,7 +5212,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
/**
* ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
**/
static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
{
@@ -5277,7 +5248,7 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
/**
* ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
*
* This function serves two purposes. First it strobes the interrupt lines
* in order to make certain interrupts are occurring. Secondly it sets the
@@ -5311,7 +5282,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
(IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
} else {
/* get one bit for every active tx/rx interrupt vector */
- for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
+ for (i = 0; i < adapter->num_q_vectors; i++) {
struct ixgbe_q_vector *qv = adapter->q_vector[i];
if (qv->rx.ring || qv->tx.ring)
eics |= ((u64)1 << i);
@@ -5325,8 +5296,8 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
/**
* ixgbe_watchdog_update_link - update the link status
- * @adapter - pointer to the device adapter structure
- * @link_speed - pointer to a u32 to store the link_speed
+ * @adapter: pointer to the device adapter structure
+ * @link_speed: pointer to a u32 to store the link_speed
**/
static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
{
@@ -5369,7 +5340,7 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
/**
* ixgbe_watchdog_link_is_up - update netif_carrier status and
* print link up message
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
**/
static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
{
@@ -5429,7 +5400,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
/**
* ixgbe_watchdog_link_is_down - update netif_carrier status and
* print link down message
- * @adapter - pointer to the adapter structure
+ * @adapter: pointer to the adapter structure
**/
static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
{
@@ -5457,7 +5428,7 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
/**
* ixgbe_watchdog_flush_tx - flush queues on link down
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
**/
static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
{
@@ -5506,7 +5477,7 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
/**
* ixgbe_watchdog_subtask - check and bring link up
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
**/
static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
{
@@ -5530,7 +5501,7 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
/**
* ixgbe_sfp_detection_subtask - poll for SFP+ cable
- * @adapter - the ixgbe adapter structure
+ * @adapter: the ixgbe adapter structure
**/
static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
{
@@ -5597,7 +5568,7 @@ sfp_out:
/**
* ixgbe_sfp_link_config_subtask - set up link SFP after module install
- * @adapter - the ixgbe adapter structure
+ * @adapter: the ixgbe adapter structure
**/
static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
{
@@ -6228,8 +6199,14 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
if (((protocol == htons(ETH_P_FCOE)) ||
(protocol == htons(ETH_P_FIP))) &&
(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
- txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
- txq += adapter->ring_feature[RING_F_FCOE].mask;
+ struct ixgbe_ring_feature *f;
+
+ f = &adapter->ring_feature[RING_F_FCOE];
+
+ while (txq >= f->indices)
+ txq -= f->indices;
+ txq += adapter->ring_feature[RING_F_FCOE].offset;
+
return txq;
}
#endif
@@ -6384,17 +6361,12 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *tx_ring;
- if (skb->len <= 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
/*
* The minimum packet size for olinfo paylen is 17 so pad the skb
* in order to meet this minimum size requirement.
*/
- if (skb->len < 17) {
- if (skb_padto(skb, 17))
+ if (unlikely(skb->len < 17)) {
+ if (skb_pad(skb, 17 - skb->len))
return NETDEV_TX_OK;
skb->len = 17;
}
@@ -6528,11 +6500,8 @@ static void ixgbe_netpoll(struct net_device *netdev)
adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- for (i = 0; i < num_q_vectors; i++) {
- struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
- ixgbe_msix_clean_rings(0, q_vector);
- }
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
} else {
ixgbe_intr(adapter->pdev->irq, netdev);
}
@@ -6589,8 +6558,9 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
}
#ifdef CONFIG_IXGBE_DCB
-/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
- * #adapter: pointer to ixgbe_adapter
+/**
+ * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
+ * @adapter: pointer to ixgbe_adapter
* @tc: number of traffic classes currently enabled
*
* Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
@@ -6625,8 +6595,8 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
return;
}
-/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
- * classes.
+/**
+ * ixgbe_setup_tc - configure net_device for multiple traffic classes
*
* @netdev: net device to configure
* @tc: number of traffic classes to enable
@@ -6642,6 +6612,11 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
return -EINVAL;
}
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ e_err(drv, "Enable failed, SR-IOV enabled\n");
+ return -EINVAL;
+ }
+
/* Hardware supports up to 8 traffic classes */
if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
(hw->mac.type == ixgbe_mac_82598EB &&
@@ -6701,11 +6676,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
-#ifdef CONFIG_DCB
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
- features &= ~NETIF_F_HW_VLAN_RX;
-#endif
-
/* return error if RXHASH is being enabled when RSS is not supported */
if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
features &= ~NETIF_F_RXHASH;
@@ -6718,7 +6688,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
features &= ~NETIF_F_LRO;
-
return features;
}
@@ -6766,6 +6735,11 @@ static int ixgbe_set_features(struct net_device *netdev,
need_reset = true;
}
+ if (features & NETIF_F_HW_VLAN_RX)
+ ixgbe_vlan_strip_enable(adapter);
+ else
+ ixgbe_vlan_strip_disable(adapter);
+
if (changed & NETIF_F_RXALL)
need_reset = true;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 2411770..71659ed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -907,6 +907,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
* 8 SFP_act_lmt_DA_CORE1 - 82599-specific
* 9 SFP_1g_cu_CORE0 - 82599-specific
* 10 SFP_1g_cu_CORE1 - 82599-specific
+ * 11 SFP_1g_sx_CORE0 - 82599-specific
+ * 12 SFP_1g_sx_CORE1 - 82599-specific
*/
if (hw->mac.type == ixgbe_mac_82598EB) {
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
@@ -957,6 +959,13 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type =
ixgbe_sfp_type_1g_cu_core1;
+ } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_sx_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_sx_core1;
} else {
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
}
@@ -1049,7 +1058,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
/* Verify supported 1G SFP modules */
if (comp_codes_10g == 0 &&
!(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
goto out;
@@ -1064,7 +1075,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->mac.ops.get_device_caps(hw, &enforce_sfp);
if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
!((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
- (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) {
+ (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) ||
+ (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0) ||
+ (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1))) {
/* Make sure we're a supported PHY type */
if (hw->phy.type == ixgbe_phy_sfp_intel) {
status = 0;
@@ -1128,10 +1141,12 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
* SR modules
*/
if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
- sfp_type == ixgbe_sfp_type_1g_cu_core0)
+ sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_sx_core0)
sfp_type = ixgbe_sfp_type_srlr_core0;
else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
- sfp_type == ixgbe_sfp_type_1g_cu_core1)
+ sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_sx_core1)
sfp_type = ixgbe_sfp_type_srlr_core1;
/* Read offset to PHY init contents */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index ddc6a4d..3456d56 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -26,6 +26,7 @@
*******************************************************************************/
#include "ixgbe.h"
#include <linux/export.h>
+#include <linux/ptp_classify.h>
/*
* The 82599 and the X540 do not have true 64bit nanosecond scale
@@ -100,9 +101,13 @@
#define NSECS_PER_SEC 1000000000ULL
#endif
+static struct sock_filter ptp_filter[] = {
+ PTP_FILTER
+};
+
/**
* ixgbe_ptp_read - read raw cycle counter (to be used by time counter)
- * @cc - the cyclecounter structure
+ * @cc: the cyclecounter structure
*
* this function reads the cyclecounter registers and is called by the
* cyclecounter structure used to construct a ns counter from the
@@ -123,8 +128,8 @@ static cycle_t ixgbe_ptp_read(const struct cyclecounter *cc)
/**
* ixgbe_ptp_adjfreq
- * @ptp - the ptp clock structure
- * @ppb - parts per billion adjustment from base
+ * @ptp: the ptp clock structure
+ * @ppb: parts per billion adjustment from base
*
* adjust the frequency of the ptp cycle counter by the
* indicated ppb from the base frequency.
@@ -170,8 +175,8 @@ static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
/**
* ixgbe_ptp_adjtime
- * @ptp - the ptp clock structure
- * @delta - offset to adjust the cycle counter by
+ * @ptp: the ptp clock structure
+ * @delta: offset to adjust the cycle counter by
*
* adjust the timer by resetting the timecounter structure.
*/
@@ -198,8 +203,8 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
/**
* ixgbe_ptp_gettime
- * @ptp - the ptp clock structure
- * @ts - timespec structure to hold the current time value
+ * @ptp: the ptp clock structure
+ * @ts: timespec structure to hold the current time value
*
* read the timecounter and return the correct value on ns,
* after converting it into a struct timespec.
@@ -224,8 +229,8 @@ static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
/**
* ixgbe_ptp_settime
- * @ptp - the ptp clock structure
- * @ts - the timespec containing the new time for the cycle counter
+ * @ptp: the ptp clock structure
+ * @ts: the timespec containing the new time for the cycle counter
*
* reset the timecounter to use a new base value instead of the kernel
* wall timer value.
@@ -251,9 +256,9 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
/**
* ixgbe_ptp_enable
- * @ptp - the ptp clock structure
- * @rq - the requested feature to change
- * @on - whether to enable or disable the feature
+ * @ptp: the ptp clock structure
+ * @rq: the requested feature to change
+ * @on: whether to enable or disable the feature
*
* enable (or disable) ancillary features of the phc subsystem.
* our driver only supports the PPS feature on the X540
@@ -289,8 +294,8 @@ static int ixgbe_ptp_enable(struct ptp_clock_info *ptp,
/**
* ixgbe_ptp_check_pps_event
- * @adapter - the private adapter structure
- * @eicr - the interrupt cause register value
+ * @adapter: the private adapter structure
+ * @eicr: the interrupt cause register value
*
* This function is called by the interrupt routine when checking for
* interrupts. It will check and handle a pps event.
@@ -307,20 +312,21 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr)
!(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED))
return;
- switch (hw->mac.type) {
- case ixgbe_mac_X540:
- if (eicr & IXGBE_EICR_TIMESYNC)
+ if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
ptp_clock_event(adapter->ptp_clock, &event);
- break;
- default:
- break;
+ break;
+ default:
+ break;
+ }
}
}
/**
* ixgbe_ptp_enable_sdp
- * @hw - the hardware private structure
- * @shift - the clock shift for calculating nanoseconds
+ * @hw: the hardware private structure
+ * @shift: the clock shift for calculating nanoseconds
*
* this function enables the clock out feature on the sdp0 for the
* X540 device. It will create a 1second periodic output that can be
@@ -393,7 +399,7 @@ static void ixgbe_ptp_enable_sdp(struct ixgbe_hw *hw, int shift)
/**
* ixgbe_ptp_disable_sdp
- * @hw - the private hardware structure
+ * @hw: the private hardware structure
*
* this function disables the auxiliary SDP clock out feature
*/
@@ -425,6 +431,68 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_ptp_match - determine if this skb matches a ptp packet
+ * @skb: pointer to the skb
+ * @hwtstamp: pointer to the hwtstamp_config to check
+ *
+ * Determine whether the skb should have been timestamped, assuming the
+ * hwtstamp was set via the hwtstamp ioctl. Returns non-zero when the packet
+ * should have a timestamp waiting in the registers, and 0 otherwise.
+ *
+ * V1 packets have to check the version type to determine whether they are
+ * correct. However, we can't directly access the data because it might be
+ * fragmented in the SKB, in paged memory. In order to work around this, we
+ * use skb_copy_bits which will properly copy the data whether it is in the
+ * paged memory fragments or not. We have to copy the IP header as well as the
+ * message type.
+ */
+static int ixgbe_ptp_match(struct sk_buff *skb, int rx_filter)
+{
+ struct iphdr iph;
+ u8 msgtype;
+ unsigned int type, offset;
+
+ if (rx_filter == HWTSTAMP_FILTER_NONE)
+ return 0;
+
+ type = sk_run_filter(skb, ptp_filter);
+
+ if (likely(rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT))
+ return type & PTP_CLASS_V2;
+
+ /* For the remaining cases actually check message type */
+ switch (type) {
+ case PTP_CLASS_V1_IPV4:
+ skb_copy_bits(skb, OFF_IHL, &iph, sizeof(iph));
+ offset = ETH_HLEN + (iph.ihl << 2) + UDP_HLEN + OFF_PTP_CONTROL;
+ break;
+ case PTP_CLASS_V1_IPV6:
+ offset = OFF_PTP6 + OFF_PTP_CONTROL;
+ break;
+ default:
+ /* other cases invalid or handled above */
+ return 0;
+ }
+
+ /* Make sure our buffer is long enough */
+ if (skb->len < offset)
+ return 0;
+
+ skb_copy_bits(skb, offset, &msgtype, sizeof(msgtype));
+
+ switch (rx_filter) {
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ return (msgtype == IXGBE_RXMTRL_V1_SYNC_MSG);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ return (msgtype == IXGBE_RXMTRL_V1_DELAY_REQ_MSG);
+ break;
+ default:
+ return 0;
+ }
+}
+
+/**
* ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp
* @q_vector: structure containing interrupt and ring information
* @skb: particular skb to send timestamp with
@@ -473,6 +541,7 @@ void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
/**
* ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
* @q_vector: structure containing interrupt and ring information
+ * @rx_desc: the rx descriptor
* @skb: particular skb to send timestamp with
*
* if the timestamp is valid, we convert it into the timecounter ns
@@ -480,6 +549,7 @@ void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
* is passed up the network stack
*/
void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+ union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct ixgbe_adapter *adapter;
@@ -497,21 +567,33 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
hw = &adapter->hw;
tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+
+ /* Check if we have a valid timestamp and make sure the skb should
+ * have been timestamped */
+ if (likely(!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID) ||
+ !ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
+ return;
+
+ /*
+ * Always read the registers, in order to clear a possible fault
+ * because of stagnant RX timestamp values for a packet that never
+ * reached the queue.
+ */
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
/*
- * If this bit is set, then the RX registers contain the time stamp. No
- * other packet will be time stamped until we read these registers, so
- * read the registers to make them available again. Because only one
- * packet can be time stamped at a time, we know that the register
- * values must belong to this one here and therefore we don't need to
- * compare any of the additional attributes stored for it.
+ * If the timestamp bit is set in the packet's descriptor, we know the
+ * timestamp belongs to this packet. No other packet can be
+ * timestamped until the registers for timestamping have been read.
+ * Therefor only one packet with this bit can be in the queue at a
+ * time, and the rx timestamp values that were in the registers belong
+ * to this packet.
*
* If nothing went wrong, then it should have a skb_shared_tx that we
* can turn into a skb_shared_hwtstamps.
*/
- if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
+ if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
return;
spin_lock_irqsave(&adapter->tmreg_lock, flags);
@@ -539,6 +621,11 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
* type has to be specified. Matching the kind of event packet is
* not supported, with the exception of "all V2 events regardless of
* level 2 or 4".
+ *
+ * Since hardware always timestamps Path delay packets when timestamping V2
+ * packets, regardless of the type specified in the register, only use V2
+ * Event mode. This more accurately tells the user what the hardware is going
+ * to do anyways.
*/
int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
struct ifreq *ifr, int cmd)
@@ -582,41 +669,30 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
is_l4 = true;
break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_mtrl = IXGBE_RXMTRL_V2_SYNC_MSG;
- is_l2 = true;
- is_l4 = true;
- config.rx_filter = HWTSTAMP_FILTER_SOME;
- break;
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_mtrl = IXGBE_RXMTRL_V2_DELAY_REQ_MSG;
- is_l2 = true;
- is_l4 = true;
- config.rx_filter = HWTSTAMP_FILTER_SOME;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
is_l2 = true;
is_l4 = true;
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_ALL:
default:
/*
- * register RXMTRL must be set, therefore it is not
- * possible to time stamp both V1 Sync and Delay_Req messages
- * and hardware does not support timestamping all packets
- * => return error
+ * register RXMTRL must be set in order to do V1 packets,
+ * therefore it is not possible to time stamp both V1 Sync and
+ * Delay_Req messages and hardware does not support
+ * timestamping all packets => return error
*/
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
@@ -626,6 +702,9 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
return 0;
}
+ /* Store filter value for later use */
+ adapter->rx_hwtstamp_filter = config.rx_filter;
+
/* define ethertype filter for timestamped packets */
if (is_l2)
IXGBE_WRITE_REG(hw, IXGBE_ETQF(3),
@@ -690,7 +769,7 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
/**
* ixgbe_ptp_start_cyclecounter - create the cycle counter from hw
- * @adapter - pointer to the adapter structure
+ * @adapter: pointer to the adapter structure
*
* this function initializes the timecounter and cyclecounter
* structures for use in generated a ns counter from the arbitrary
@@ -708,6 +787,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 incval = 0;
+ u32 timinca = 0;
u32 shift = 0;
u32 cycle_speed;
unsigned long flags;
@@ -730,8 +810,16 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
break;
}
- /* Bail if the cycle speed didn't change */
- if (adapter->cycle_speed == cycle_speed)
+ /*
+ * grab the current TIMINCA value from the register so that it can be
+ * double checked. If the register value has been cleared, it must be
+ * reset to the correct value for generating a cyclecounter. If
+ * TIMINCA is zero, the SYSTIME registers do not increment at all.
+ */
+ timinca = IXGBE_READ_REG(hw, IXGBE_TIMINCA);
+
+ /* Bail if the cycle speed didn't change and TIMINCA is non-zero */
+ if (adapter->cycle_speed == cycle_speed && timinca)
return;
/* disable the SDP clock out */
@@ -817,7 +905,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
/**
* ixgbe_ptp_init
- * @adapter - the ixgbe private adapter structure
+ * @adapter: the ixgbe private adapter structure
*
* This function performs the required steps for enabling ptp
* support. If ptp support has already been loaded it simply calls the
@@ -861,6 +949,10 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
return;
}
+ /* initialize the ptp filter */
+ if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter)))
+ e_dev_warn("ptp_filter_init failed\n");
+
spin_lock_init(&adapter->tmreg_lock);
ixgbe_ptp_start_cyclecounter(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index 1d80b1c..16ddf14 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -37,7 +37,6 @@
#include <linux/netdevice.h>
#include <linux/hwmon.h>
-#ifdef CONFIG_IXGBE_HWMON
/* hwmon callback functions */
static ssize_t ixgbe_hwmon_show_location(struct device *dev,
struct device_attribute *attr,
@@ -96,11 +95,11 @@ static ssize_t ixgbe_hwmon_show_maxopthresh(struct device *dev,
return sprintf(buf, "%u\n", value);
}
-/*
+/**
* ixgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
- * @ adapter: pointer to the adapter structure
- * @ offset: offset in the eeprom sensor data table
- * @ type: type of sensor data to display
+ * @adapter: pointer to the adapter structure
+ * @offset: offset in the eeprom sensor data table
+ * @type: type of sensor data to display
*
* For each file we want in hwmon's sysfs interface we need a device_attribute
* This is included in our hwmon_attr struct that contains the references to
@@ -241,5 +240,4 @@ err:
exit:
return rc;
}
-#endif /* CONFIG_IXGBE_HWMON */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 204848d..7416d22 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -2419,7 +2419,7 @@ typedef u32 ixgbe_physical_layer;
*/
/* BitTimes (BT) conversion */
-#define IXGBE_BT2KB(BT) ((BT + 1023) / (8 * 1024))
+#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024))
#define IXGBE_B2BT(BT) (BT * 8)
/* Calculate Delay to respond to PFC */
@@ -2450,24 +2450,31 @@ typedef u32 ixgbe_physical_layer;
#define IXGBE_PCI_DELAY 10000
/* Calculate X540 delay value in bit times */
-#define IXGBE_FILL_RATE (36 / 25)
-
-#define IXGBE_DV_X540(LINK, TC) (IXGBE_FILL_RATE * \
- (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \
- (2 * IXGBE_CABLE_DC) + \
- (2 * IXGBE_ID_X540) + \
- IXGBE_HD + IXGBE_B2BT(TC)))
+#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \
+ ((36 * \
+ (IXGBE_B2BT(_max_frame_link) + \
+ IXGBE_PFC_D + \
+ (2 * IXGBE_CABLE_DC) + \
+ (2 * IXGBE_ID_X540) + \
+ IXGBE_HD) / 25 + 1) + \
+ 2 * IXGBE_B2BT(_max_frame_tc))
/* Calculate 82599, 82598 delay value in bit times */
-#define IXGBE_DV(LINK, TC) (IXGBE_FILL_RATE * \
- (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \
- (2 * IXGBE_CABLE_DC) + (2 * IXGBE_ID) + \
- IXGBE_HD + IXGBE_B2BT(TC)))
+#define IXGBE_DV(_max_frame_link, _max_frame_tc) \
+ ((36 * \
+ (IXGBE_B2BT(_max_frame_link) + \
+ IXGBE_PFC_D + \
+ (2 * IXGBE_CABLE_DC) + \
+ (2 * IXGBE_ID) + \
+ IXGBE_HD) / 25 + 1) + \
+ 2 * IXGBE_B2BT(_max_frame_tc))
/* Calculate low threshold delay values */
-#define IXGBE_LOW_DV_X540(TC) (2 * IXGBE_B2BT(TC) + \
- (IXGBE_FILL_RATE * IXGBE_PCI_DELAY))
-#define IXGBE_LOW_DV(TC) (2 * IXGBE_LOW_DV_X540(TC))
+#define IXGBE_LOW_DV_X540(_max_frame_tc) \
+ (2 * IXGBE_B2BT(_max_frame_tc) + \
+ (36 * IXGBE_PCI_DELAY / 25) + 1)
+#define IXGBE_LOW_DV(_max_frame_tc) \
+ (2 * IXGBE_LOW_DV_X540(_max_frame_tc))
/* Software ATR hash keys */
#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
@@ -2597,6 +2604,8 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_da_act_lmt_core1 = 8,
ixgbe_sfp_type_1g_cu_core0 = 9,
ixgbe_sfp_type_1g_cu_core1 = 10,
+ ixgbe_sfp_type_1g_sx_core0 = 11,
+ ixgbe_sfp_type_1g_sx_core1 = 12,
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
};
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index f69ec42..0368160 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -115,7 +115,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
}
-/*
+/**
* ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
* @adapter: pointer to adapter struct
* @direction: 0 for Rx, 1 for Tx, -1 for other causes
@@ -1942,8 +1942,8 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
}
}
-/*
- * ixgbevf_set_num_queues: Allocate queues for device, feature dependent
+/**
+ * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
* @adapter: board private structure to initialize
*
* This is the top level queue allocation routine. The order here is very
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 5dc9cbd..9fa39eb 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -149,7 +149,6 @@ ltq_etop_hw_receive(struct ltq_etop_chan *ch)
spin_unlock_irqrestore(&priv->lock, flags);
skb_put(skb, len);
- skb->dev = ch->netdev;
skb->protocol = eth_type_trans(skb, ch->netdev);
netif_receive_skb(skb);
}
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 04d901d..770ee55 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -436,7 +436,9 @@ struct mv643xx_eth_private {
/*
* Hardware-specific parameters.
*/
+#if defined(CONFIG_HAVE_CLK)
struct clk *clk;
+#endif
unsigned int t_clk;
};
@@ -1894,7 +1896,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
goto out_free;
}
- rx_desc = (struct rx_desc *)rxq->rx_desc_area;
+ rx_desc = rxq->rx_desc_area;
for (i = 0; i < rxq->rx_ring_size; i++) {
int nexti;
@@ -1999,7 +2001,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
txq->tx_desc_area_size = size;
- tx_desc = (struct tx_desc *)txq->tx_desc_area;
+ tx_desc = txq->tx_desc_area;
for (i = 0; i < txq->tx_ring_size; i++) {
struct tx_desc *txd = tx_desc + i;
int nexti;
@@ -2895,17 +2897,17 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mp->dev = dev;
/*
- * Get the clk rate, if there is one, otherwise use the default.
+ * Start with a default rate, and if there is a clock, allow
+ * it to override the default.
*/
+ mp->t_clk = 133000000;
+#if defined(CONFIG_HAVE_CLK)
mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0"));
if (!IS_ERR(mp->clk)) {
clk_prepare_enable(mp->clk);
mp->t_clk = clk_get_rate(mp->clk);
- } else {
- mp->t_clk = 133000000;
- printk(KERN_WARNING "Unable to get clock");
}
-
+#endif
set_params(mp, pd);
netif_set_real_num_tx_queues(dev, mp->txq_count);
netif_set_real_num_rx_queues(dev, mp->rxq_count);
@@ -2995,10 +2997,13 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
phy_detach(mp->phy);
cancel_work_sync(&mp->tx_timeout_task);
+#if defined(CONFIG_HAVE_CLK)
if (!IS_ERR(mp->clk)) {
clk_disable_unprepare(mp->clk);
clk_put(mp->clk);
}
+#endif
+
free_netdev(mp->dev);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 1db023b..5948972 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1032,7 +1032,7 @@ static int rxq_init(struct net_device *dev)
}
memset((void *)pep->p_rx_desc_area, 0, size);
/* initialize the next_desc_ptr links in the Rx descriptors ring */
- p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area;
+ p_rx_desc = pep->p_rx_desc_area;
for (i = 0; i < rx_desc_num; i++) {
p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
@@ -1095,7 +1095,7 @@ static int txq_init(struct net_device *dev)
}
memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
/* Initialize the next_desc_ptr links in the Tx descriptors ring */
- p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area;
+ p_tx_desc = pep->p_tx_desc_area;
for (i = 0; i < tx_desc_num; i++) {
p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index cace36f..2b0748d 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -141,6 +141,7 @@ static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */
{ 0 }
};
@@ -3079,8 +3080,10 @@ static irqreturn_t sky2_intr(int irq, void *dev_id)
/* Reading this mask interrupts as side effect */
status = sky2_read32(hw, B0_Y2_SP_ISRC2);
- if (status == 0 || status == ~0)
+ if (status == 0 || status == ~0) {
+ sky2_write32(hw, B0_Y2_SP_ICR, 2);
return IRQ_NONE;
+ }
prefetch(&hw->st_le[hw->st_idx]);
@@ -3349,6 +3352,17 @@ static void sky2_reset(struct sky2_hw *hw)
sky2_pci_write16(hw, pdev->pcie_cap + PCI_EXP_LNKCTL,
reg);
+ if (hw->chip_id == CHIP_ID_YUKON_PRM &&
+ hw->chip_rev == CHIP_REV_YU_PRM_A0) {
+ /* change PHY Interrupt polarity to low active */
+ reg = sky2_read16(hw, GPHY_CTRL);
+ sky2_write16(hw, GPHY_CTRL, reg | GPC_INTPOL);
+
+ /* adapt HW for low active PHY Interrupt */
+ reg = sky2_read16(hw, Y2_CFG_SPC + PCI_LDO_CTRL);
+ sky2_write16(hw, Y2_CFG_SPC + PCI_LDO_CTRL, reg | PHY_M_UNDOC1);
+ }
+
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
/* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
@@ -4381,10 +4395,12 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features)
struct sky2_port *sky2 = netdev_priv(dev);
netdev_features_t changed = dev->features ^ features;
- if (changed & NETIF_F_RXCSUM) {
- bool on = features & NETIF_F_RXCSUM;
- sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
- on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
+ if ((changed & NETIF_F_RXCSUM) &&
+ !(sky2->hw->flags & SKY2_HW_NEW_LE)) {
+ sky2_write32(sky2->hw,
+ Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ (features & NETIF_F_RXCSUM)
+ ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
}
if (changed & NETIF_F_RXHASH)
@@ -4869,7 +4885,7 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
"UL 2", /* 0xba */
"Unknown", /* 0xbb */
"Optima", /* 0xbc */
- "Optima Prime", /* 0xbd */
+ "OptimaEEE", /* 0xbd */
"Optima 2", /* 0xbe */
};
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index 3c896ce..615ac63 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -23,6 +23,7 @@ enum {
PSM_CONFIG_REG3 = 0x164,
PSM_CONFIG_REG4 = 0x168,
+ PCI_LDO_CTRL = 0xbc,
};
/* Yukon-2 */
@@ -586,6 +587,10 @@ enum yukon_supr_rev {
CHIP_REV_YU_SU_B1 = 3,
};
+enum yukon_prm_rev {
+ CHIP_REV_YU_PRM_Z1 = 1,
+ CHIP_REV_YU_PRM_A0 = 2,
+};
/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 842c8ce..7e94987d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1080,6 +1080,25 @@ static struct mlx4_cmd_info cmd_info[] = {
.verify = NULL,
.wrapper = NULL
},
+ /* flow steering commands */
+ {
+ .opcode = MLX4_QP_FLOW_STEERING_ATTACH,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = true,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
+ },
+ {
+ .opcode = MLX4_QP_FLOW_STEERING_DETACH,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
+ },
};
static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 72901ce..dd6a77b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -38,6 +38,10 @@
#include "mlx4_en.h"
#include "en_port.h"
+#define EN_ETHTOOL_QP_ATTACH (1ull << 63)
+#define EN_ETHTOOL_MAC_MASK 0xffffffffffffULL
+#define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
+#define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff)
static void
mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
@@ -599,16 +603,369 @@ static int mlx4_en_set_rxfh_indir(struct net_device *dev,
return err;
}
+#define all_zeros_or_all_ones(field) \
+ ((field) == 0 || (field) == (__force typeof(field))-1)
+
+static int mlx4_en_validate_flow(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_usrip4_spec *l3_mask;
+ struct ethtool_tcpip4_spec *l4_mask;
+ struct ethhdr *eth_mask;
+ u64 full_mac = ~0ull;
+ u64 zero_mac = 0;
+
+ if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
+ return -EINVAL;
+
+ switch (cmd->fs.flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ if (cmd->fs.m_u.tcp_ip4_spec.tos)
+ return -EINVAL;
+ l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
+ /* don't allow mask which isn't all 0 or 1 */
+ if (!all_zeros_or_all_ones(l4_mask->ip4src) ||
+ !all_zeros_or_all_ones(l4_mask->ip4dst) ||
+ !all_zeros_or_all_ones(l4_mask->psrc) ||
+ !all_zeros_or_all_ones(l4_mask->pdst))
+ return -EINVAL;
+ break;
+ case IP_USER_FLOW:
+ l3_mask = &cmd->fs.m_u.usr_ip4_spec;
+ if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
+ cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
+ (!l3_mask->ip4src && !l3_mask->ip4dst) ||
+ !all_zeros_or_all_ones(l3_mask->ip4src) ||
+ !all_zeros_or_all_ones(l3_mask->ip4dst))
+ return -EINVAL;
+ break;
+ case ETHER_FLOW:
+ eth_mask = &cmd->fs.m_u.ether_spec;
+ /* source mac mask must not be set */
+ if (memcmp(eth_mask->h_source, &zero_mac, ETH_ALEN))
+ return -EINVAL;
+
+ /* dest mac mask must be ff:ff:ff:ff:ff:ff */
+ if (memcmp(eth_mask->h_dest, &full_mac, ETH_ALEN))
+ return -EINVAL;
+
+ if (!all_zeros_or_all_ones(eth_mask->h_proto))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((cmd->fs.flow_type & FLOW_EXT)) {
+ if (cmd->fs.m_ext.vlan_etype ||
+ !(cmd->fs.m_ext.vlan_tci == 0 ||
+ cmd->fs.m_ext.vlan_tci == cpu_to_be16(0xfff)))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int add_ip_rule(struct mlx4_en_priv *priv,
+ struct ethtool_rxnfc *cmd,
+ struct list_head *list_h)
+{
+ struct mlx4_spec_list *spec_l3;
+ struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
+
+ spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
+ if (!spec_l3) {
+ en_err(priv, "Fail to alloc ethtool rule.\n");
+ return -ENOMEM;
+ }
+
+ spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
+ spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
+ if (l3_mask->ip4src)
+ spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
+ spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst;
+ if (l3_mask->ip4dst)
+ spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
+ list_add_tail(&spec_l3->list, list_h);
+
+ return 0;
+}
+
+static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
+ struct ethtool_rxnfc *cmd,
+ struct list_head *list_h, int proto)
+{
+ struct mlx4_spec_list *spec_l3;
+ struct mlx4_spec_list *spec_l4;
+ struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
+
+ spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
+ spec_l4 = kzalloc(sizeof *spec_l4, GFP_KERNEL);
+ if (!spec_l4 || !spec_l3) {
+ en_err(priv, "Fail to alloc ethtool rule.\n");
+ kfree(spec_l3);
+ kfree(spec_l4);
+ return -ENOMEM;
+ }
+
+ spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
+
+ if (proto == TCP_V4_FLOW) {
+ spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
+ spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
+ spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
+ spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
+ spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
+ } else {
+ spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
+ spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
+ spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
+ spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc;
+ spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst;
+ }
+
+ if (l4_mask->ip4src)
+ spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
+ if (l4_mask->ip4dst)
+ spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
+
+ if (l4_mask->psrc)
+ spec_l4->tcp_udp.src_port_msk = EN_ETHTOOL_SHORT_MASK;
+ if (l4_mask->pdst)
+ spec_l4->tcp_udp.dst_port_msk = EN_ETHTOOL_SHORT_MASK;
+
+ list_add_tail(&spec_l3->list, list_h);
+ list_add_tail(&spec_l4->list, list_h);
+
+ return 0;
+}
+
+static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
+ struct ethtool_rxnfc *cmd,
+ struct list_head *rule_list_h)
+{
+ int err;
+ u64 mac;
+ __be64 be_mac;
+ struct ethhdr *eth_spec;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_spec_list *spec_l2;
+ __be64 mac_msk = cpu_to_be64(EN_ETHTOOL_MAC_MASK << 16);
+
+ err = mlx4_en_validate_flow(dev, cmd);
+ if (err)
+ return err;
+
+ spec_l2 = kzalloc(sizeof *spec_l2, GFP_KERNEL);
+ if (!spec_l2)
+ return -ENOMEM;
+
+ mac = priv->mac & EN_ETHTOOL_MAC_MASK;
+ be_mac = cpu_to_be64(mac << 16);
+
+ spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
+ if ((cmd->fs.flow_type & ~FLOW_EXT) != ETHER_FLOW)
+ memcpy(spec_l2->eth.dst_mac, &be_mac, ETH_ALEN);
+
+ if ((cmd->fs.flow_type & FLOW_EXT) && cmd->fs.m_ext.vlan_tci) {
+ spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
+ spec_l2->eth.vlan_id_msk = cpu_to_be16(0xfff);
+ }
+
+ list_add_tail(&spec_l2->list, rule_list_h);
+
+ switch (cmd->fs.flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ eth_spec = &cmd->fs.h_u.ether_spec;
+ memcpy(&spec_l2->eth.dst_mac, eth_spec->h_dest, ETH_ALEN);
+ spec_l2->eth.ether_type = eth_spec->h_proto;
+ if (eth_spec->h_proto)
+ spec_l2->eth.ether_type_enable = 1;
+ break;
+ case IP_USER_FLOW:
+ err = add_ip_rule(priv, cmd, rule_list_h);
+ break;
+ case TCP_V4_FLOW:
+ err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW);
+ break;
+ case UDP_V4_FLOW:
+ err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW);
+ break;
+ }
+
+ return err;
+}
+
+static int mlx4_en_flow_replace(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ int err;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct ethtool_flow_id *loc_rule;
+ struct mlx4_spec_list *spec, *tmp_spec;
+ u32 qpn;
+ u64 reg_id;
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .allow_loopback = 1,
+ .promisc_mode = MLX4_FS_PROMISC_NONE,
+ };
+
+ rule.port = priv->port;
+ rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location;
+ INIT_LIST_HEAD(&rule.list);
+
+ /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */
+ if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC)
+ qpn = priv->drop_qp.qpn;
+ else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
+ qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
+ } else {
+ if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
+ en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n",
+ cmd->fs.ring_cookie);
+ return -EINVAL;
+ }
+ qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
+ if (!qpn) {
+ en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n",
+ cmd->fs.ring_cookie);
+ return -EINVAL;
+ }
+ }
+ rule.qpn = qpn;
+ err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list);
+ if (err)
+ goto out_free_list;
+
+ loc_rule = &priv->ethtool_rules[cmd->fs.location];
+ if (loc_rule->id) {
+ err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id);
+ if (err) {
+ en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n",
+ cmd->fs.location, loc_rule->id);
+ goto out_free_list;
+ }
+ loc_rule->id = 0;
+ memset(&loc_rule->flow_spec, 0,
+ sizeof(struct ethtool_rx_flow_spec));
+ }
+ err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
+ if (err) {
+ en_err(priv, "Fail to attach network rule at location %d.\n",
+ cmd->fs.location);
+ goto out_free_list;
+ }
+ loc_rule->id = reg_id;
+ memcpy(&loc_rule->flow_spec, &cmd->fs,
+ sizeof(struct ethtool_rx_flow_spec));
+
+out_free_list:
+ list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
+ list_del(&spec->list);
+ kfree(spec);
+ }
+ return err;
+}
+
+static int mlx4_en_flow_detach(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ int err = 0;
+ struct ethtool_flow_id *rule;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
+ return -EINVAL;
+
+ rule = &priv->ethtool_rules[cmd->fs.location];
+ if (!rule->id) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ err = mlx4_flow_detach(priv->mdev->dev, rule->id);
+ if (err) {
+ en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n",
+ cmd->fs.location, rule->id);
+ goto out;
+ }
+ rule->id = 0;
+ memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
+out:
+ return err;
+
+}
+
+static int mlx4_en_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ int loc)
+{
+ int err = 0;
+ struct ethtool_flow_id *rule;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
+ return -EINVAL;
+
+ rule = &priv->ethtool_rules[loc];
+ if (rule->id)
+ memcpy(&cmd->fs, &rule->flow_spec,
+ sizeof(struct ethtool_rx_flow_spec));
+ else
+ err = -ENOENT;
+
+ return err;
+}
+
+static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
+{
+
+ int i, res = 0;
+ for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
+ if (priv->ethtool_rules[i].id)
+ res++;
+ }
+ return res;
+
+}
+
static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
+ int i = 0, priority = 0;
+
+ if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
+ cmd->cmd == ETHTOOL_GRXCLSRULE ||
+ cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
+ mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return -EINVAL;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = priv->rx_ring_num;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = mlx4_en_get_num_flows(priv);
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
+ err = mlx4_en_get_flow(dev, cmd, i);
+ if (!err)
+ rule_locs[priority++] = i;
+ i++;
+ }
+ err = 0;
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -617,6 +974,30 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return err;
}
+static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ int err = 0;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return -EINVAL;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = mlx4_en_flow_replace(dev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = mlx4_en_flow_detach(dev, cmd);
+ break;
+ default:
+ en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd);
+ return -EINVAL;
+ }
+
+ return err;
+}
+
const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_drvinfo = mlx4_en_get_drvinfo,
.get_settings = mlx4_en_get_settings,
@@ -637,6 +1018,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_ringparam = mlx4_en_get_ringparam,
.set_ringparam = mlx4_en_set_ringparam,
.get_rxnfc = mlx4_en_get_rxnfc,
+ .set_rxnfc = mlx4_en_set_rxnfc,
.get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
.get_rxfh_indir = mlx4_en_get_rxfh_indir,
.set_rxfh_indir = mlx4_en_set_rxfh_indir,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 926d8aa..94375a8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -170,33 +170,81 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
static void mlx4_en_clear_list(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_mc_list *tmp, *mc_to_del;
- kfree(priv->mc_addrs);
- priv->mc_addrs = NULL;
- priv->mc_addrs_cnt = 0;
+ list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
+ list_del(&mc_to_del->list);
+ kfree(mc_to_del);
+ }
}
static void mlx4_en_cache_mclist(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct netdev_hw_addr *ha;
- char *mc_addrs;
- int mc_addrs_cnt = netdev_mc_count(dev);
- int i;
+ struct mlx4_en_mc_list *tmp;
- mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC);
- if (!mc_addrs) {
- en_err(priv, "failed to allocate multicast list\n");
- return;
- }
- i = 0;
- netdev_for_each_mc_addr(ha, dev)
- memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
mlx4_en_clear_list(dev);
- priv->mc_addrs = mc_addrs;
- priv->mc_addrs_cnt = mc_addrs_cnt;
+ netdev_for_each_mc_addr(ha, dev) {
+ tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
+ if (!tmp) {
+ en_err(priv, "failed to allocate multicast list\n");
+ mlx4_en_clear_list(dev);
+ return;
+ }
+ memcpy(tmp->addr, ha->addr, ETH_ALEN);
+ list_add_tail(&tmp->list, &priv->mc_list);
+ }
}
+static void update_mclist_flags(struct mlx4_en_priv *priv,
+ struct list_head *dst,
+ struct list_head *src)
+{
+ struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
+ bool found;
+
+ /* Find all the entries that should be removed from dst,
+ * These are the entries that are not found in src
+ */
+ list_for_each_entry(dst_tmp, dst, list) {
+ found = false;
+ list_for_each_entry(src_tmp, src, list) {
+ if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ dst_tmp->action = MCLIST_REM;
+ }
+
+ /* Add entries that exist in src but not in dst
+ * mark them as need to add
+ */
+ list_for_each_entry(src_tmp, src, list) {
+ found = false;
+ list_for_each_entry(dst_tmp, dst, list) {
+ if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
+ dst_tmp->action = MCLIST_NONE;
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ new_mc = kmalloc(sizeof(struct mlx4_en_mc_list),
+ GFP_KERNEL);
+ if (!new_mc) {
+ en_err(priv, "Failed to allocate current multicast list\n");
+ return;
+ }
+ memcpy(new_mc, src_tmp,
+ sizeof(struct mlx4_en_mc_list));
+ new_mc->action = MCLIST_ADD;
+ list_add_tail(&new_mc->list, dst);
+ }
+ }
+}
static void mlx4_en_set_multicast(struct net_device *dev)
{
@@ -214,9 +262,10 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
mcast_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
+ struct mlx4_en_mc_list *mclist, *tmp;
u64 mcast_addr = 0;
u8 mc_list[16] = {0};
- int err;
+ int err = 0;
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
@@ -251,16 +300,46 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
priv->flags |= MLX4_EN_FLAG_PROMISC;
/* Enable promiscouos mode */
- if (!(mdev->dev->caps.flags &
- MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
- priv->base_qpn, 1);
- else
- err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn,
+ switch (mdev->dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ err = mlx4_flow_steer_promisc_add(mdev->dev,
+ priv->port,
+ priv->base_qpn,
+ MLX4_FS_PROMISC_UPLINK);
+ if (err)
+ en_err(priv, "Failed enabling promiscuous mode\n");
+ priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
+ break;
+
+ case MLX4_STEERING_MODE_B0:
+ err = mlx4_unicast_promisc_add(mdev->dev,
+ priv->base_qpn,
priv->port);
- if (err)
- en_err(priv, "Failed enabling "
- "promiscuous mode\n");
+ if (err)
+ en_err(priv, "Failed enabling unicast promiscuous mode\n");
+
+ /* Add the default qp number as multicast
+ * promisc
+ */
+ if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
+ err = mlx4_multicast_promisc_add(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed enabling multicast promiscuous mode\n");
+ priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
+ }
+ break;
+
+ case MLX4_STEERING_MODE_A0:
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev,
+ priv->port,
+ priv->base_qpn,
+ 1);
+ if (err)
+ en_err(priv, "Failed enabling promiscuous mode\n");
+ break;
+ }
/* Disable port multicast filter (unconditionally) */
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
@@ -269,15 +348,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
en_err(priv, "Failed disabling "
"multicast filter\n");
- /* Add the default qp number as multicast promisc */
- if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
- err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
- priv->port);
- if (err)
- en_err(priv, "Failed entering multicast promisc mode\n");
- priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
- }
-
/* Disable port VLAN filter */
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
if (err)
@@ -296,22 +366,40 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
priv->flags &= ~MLX4_EN_FLAG_PROMISC;
/* Disable promiscouos mode */
- if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
- priv->base_qpn, 0);
- else
- err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
+ switch (mdev->dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ err = mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_UPLINK);
+ if (err)
+ en_err(priv, "Failed disabling promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ break;
+
+ case MLX4_STEERING_MODE_B0:
+ err = mlx4_unicast_promisc_remove(mdev->dev,
+ priv->base_qpn,
priv->port);
- if (err)
- en_err(priv, "Failed disabling promiscuous mode\n");
+ if (err)
+ en_err(priv, "Failed disabling unicast promiscuous mode\n");
+ /* Disable Multicast promisc */
+ if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
+ err = mlx4_multicast_promisc_remove(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed disabling multicast promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ }
+ break;
- /* Disable Multicast promisc */
- if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
- err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
- priv->port);
+ case MLX4_STEERING_MODE_A0:
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev,
+ priv->port,
+ priv->base_qpn, 0);
if (err)
- en_err(priv, "Failed disabling multicast promiscuous mode\n");
- priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ en_err(priv, "Failed disabling promiscuous mode\n");
+ break;
}
/* Enable port VLAN filter */
@@ -329,18 +417,46 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
/* Add the default qp number as multicast promisc */
if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
- err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
- priv->port);
+ switch (mdev->dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ err = mlx4_flow_steer_promisc_add(mdev->dev,
+ priv->port,
+ priv->base_qpn,
+ MLX4_FS_PROMISC_ALL_MULTI);
+ break;
+
+ case MLX4_STEERING_MODE_B0:
+ err = mlx4_multicast_promisc_add(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ break;
+
+ case MLX4_STEERING_MODE_A0:
+ break;
+ }
if (err)
en_err(priv, "Failed entering multicast promisc mode\n");
priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
}
} else {
- int i;
/* Disable Multicast promisc */
if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
- err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
- priv->port);
+ switch (mdev->dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ err = mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_ALL_MULTI);
+ break;
+
+ case MLX4_STEERING_MODE_B0:
+ err = mlx4_multicast_promisc_remove(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ break;
+
+ case MLX4_STEERING_MODE_A0:
+ break;
+ }
if (err)
en_err(priv, "Failed disabling multicast promiscuous mode\n");
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
@@ -351,13 +467,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
if (err)
en_err(priv, "Failed disabling multicast filter\n");
- /* Detach our qp from all the multicast addresses */
- for (i = 0; i < priv->mc_addrs_cnt; i++) {
- memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
- mc_list[5] = priv->port;
- mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
- mc_list, MLX4_PROT_ETH);
- }
/* Flush mcast filter and init it with broadcast address */
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
1, MLX4_MCAST_CONFIG);
@@ -367,13 +476,8 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
netif_tx_lock_bh(dev);
mlx4_en_cache_mclist(dev);
netif_tx_unlock_bh(dev);
- for (i = 0; i < priv->mc_addrs_cnt; i++) {
- mcast_addr =
- mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
- memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
- mc_list[5] = priv->port;
- mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp,
- mc_list, 0, MLX4_PROT_ETH);
+ list_for_each_entry(mclist, &priv->mc_list, list) {
+ mcast_addr = mlx4_en_mac_to_u64(mclist->addr);
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
mcast_addr, 0, MLX4_MCAST_CONFIG);
}
@@ -381,6 +485,42 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
0, MLX4_MCAST_ENABLE);
if (err)
en_err(priv, "Failed enabling multicast filter\n");
+
+ update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
+ list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
+ if (mclist->action == MCLIST_REM) {
+ /* detach this address and delete from list */
+ memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
+ mc_list[5] = priv->port;
+ err = mlx4_multicast_detach(mdev->dev,
+ &priv->rss_map.indir_qp,
+ mc_list,
+ MLX4_PROT_ETH,
+ mclist->reg_id);
+ if (err)
+ en_err(priv, "Fail to detach multicast address\n");
+
+ /* remove from list */
+ list_del(&mclist->list);
+ kfree(mclist);
+ }
+
+ if (mclist->action == MCLIST_ADD) {
+ /* attach the address */
+ memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
+ /* needed for B0 steering support */
+ mc_list[5] = priv->port;
+ err = mlx4_multicast_attach(mdev->dev,
+ &priv->rss_map.indir_qp,
+ mc_list,
+ priv->port, 0,
+ MLX4_PROT_ETH,
+ &mclist->reg_id);
+ if (err)
+ en_err(priv, "Fail to attach multicast address\n");
+
+ }
+ }
}
out:
mutex_unlock(&mdev->state_lock);
@@ -605,6 +745,9 @@ int mlx4_en_start_port(struct net_device *dev)
return 0;
}
+ INIT_LIST_HEAD(&priv->mc_list);
+ INIT_LIST_HEAD(&priv->curr_list);
+
/* Calculate Rx buf size */
dev->mtu = min(dev->mtu, priv->max_mtu);
mlx4_en_calc_rx_buf(dev);
@@ -653,6 +796,10 @@ int mlx4_en_start_port(struct net_device *dev)
goto mac_err;
}
+ err = mlx4_en_create_drop_qp(priv);
+ if (err)
+ goto rss_err;
+
/* Configure tx cq's and rings */
for (i = 0; i < priv->tx_ring_num; i++) {
/* Configure cq */
@@ -720,13 +867,23 @@ int mlx4_en_start_port(struct net_device *dev)
/* Attach rx QP to bradcast address */
memset(&mc_list[10], 0xff, ETH_ALEN);
- mc_list[5] = priv->port;
+ mc_list[5] = priv->port; /* needed for B0 steering support */
if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
- 0, MLX4_PROT_ETH))
+ priv->port, 0, MLX4_PROT_ETH,
+ &priv->broadcast_id))
mlx4_warn(mdev, "Failed Attaching Broadcast\n");
/* Must redo promiscuous mode setup. */
priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
+ if (mdev->dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_UPLINK);
+ mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_ALL_MULTI);
+ }
/* Schedule multicast task to populate multicast list */
queue_work(mdev->workqueue, &priv->mcast_task);
@@ -742,7 +899,8 @@ tx_err:
mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
}
-
+ mlx4_en_destroy_drop_qp(priv);
+rss_err:
mlx4_en_release_rss_steer(priv);
mac_err:
mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
@@ -760,6 +918,7 @@ void mlx4_en_stop_port(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_mc_list *mclist, *tmp;
int i;
u8 mc_list[16] = {0};
@@ -778,19 +937,26 @@ void mlx4_en_stop_port(struct net_device *dev)
/* Detach All multicasts */
memset(&mc_list[10], 0xff, ETH_ALEN);
- mc_list[5] = priv->port;
+ mc_list[5] = priv->port; /* needed for B0 steering support */
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
- MLX4_PROT_ETH);
- for (i = 0; i < priv->mc_addrs_cnt; i++) {
- memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
+ MLX4_PROT_ETH, priv->broadcast_id);
+ list_for_each_entry(mclist, &priv->curr_list, list) {
+ memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
mc_list[5] = priv->port;
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
- mc_list, MLX4_PROT_ETH);
+ mc_list, MLX4_PROT_ETH, mclist->reg_id);
}
mlx4_en_clear_list(dev);
+ list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
+ list_del(&mclist->list);
+ kfree(mclist);
+ }
+
/* Flush multicast filter */
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
+ mlx4_en_destroy_drop_qp(priv);
+
/* Free TX Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
@@ -929,15 +1095,20 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
if (priv->rx_cq[i].buf)
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
+
+ if (priv->base_tx_qpn) {
+ mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
+ priv->base_tx_qpn = 0;
+ }
}
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
{
struct mlx4_en_port_profile *prof = priv->prof;
int i;
- int base_tx_qpn, err;
+ int err;
- err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn);
+ err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
if (err) {
en_err(priv, "failed reserving range for TX rings\n");
return err;
@@ -949,7 +1120,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
prof->tx_ring_size, i, TX))
goto err;
- if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i,
+ if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
prof->tx_ring_size, TXBB_SIZE))
goto err;
}
@@ -969,7 +1140,6 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
err:
en_err(priv, "Failed to allocate NIC resources\n");
- mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num);
return -ENOMEM;
}
@@ -1204,9 +1374,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
/* Configure port */
+ mlx4_en_calc_rx_buf(dev);
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
- MLX4_EN_MIN_MTU,
- 0, 0, 0, 0);
+ priv->rx_skb_size + ETH_FCS_LEN,
+ prof->tx_pause, prof->tx_ppp,
+ prof->rx_pause, prof->rx_ppp);
if (err) {
en_err(priv, "Failed setting port general configurations "
"for port %d, with error %d\n", priv->port, err);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index d49a7ac..a04cbf7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -844,6 +844,36 @@ out:
return err;
}
+int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
+{
+ int err;
+ u32 qpn;
+
+ err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn);
+ if (err) {
+ en_err(priv, "Failed reserving drop qpn\n");
+ return err;
+ }
+ err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
+ if (err) {
+ en_err(priv, "Failed allocating drop qp\n");
+ mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
+ return err;
+ }
+
+ return 0;
+}
+
+void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv)
+{
+ u32 qpn;
+
+ qpn = priv->drop_qp.qpn;
+ mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp);
+ mlx4_qp_free(priv->mdev->dev, &priv->drop_qp);
+ mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
+}
+
/* Allocate rx qp's and configure them according to rss map */
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
{
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 9c83bb8..1d70657 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -123,7 +123,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
static const char * const fname[] = {
[0] = "RSS support",
[1] = "RSS Toeplitz Hash Function support",
- [2] = "RSS XOR Hash Function support"
+ [2] = "RSS XOR Hash Function support",
+ [3] = "Device manage flow steering support"
};
int i;
@@ -391,6 +392,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
+#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
+#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
@@ -474,6 +477,12 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->num_ports = field & 0xf;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
dev_cap->max_msg_sz = 1 << (field & 0x1f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
+ if (field & 0x80)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
+ dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
+ dev_cap->fs_max_num_qp_per_entry = field;
MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
dev_cap->stat_rate_support = stat_rate;
MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
@@ -1061,6 +1070,15 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
+#define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
+#define INIT_HCA_FS_PARAM_OFFSET 0x1d0
+#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
+#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
+#define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
+#define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
+#define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
+#define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
+#define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
#define INIT_HCA_TPT_OFFSET 0x0f0
#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
@@ -1119,14 +1137,44 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
- /* multicast attributes */
-
- MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
- MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
- MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
- MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET);
- MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+ /* steering attributes */
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |=
+ cpu_to_be32(1 <<
+ INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN);
+
+ MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_entry_sz,
+ INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_table_sz,
+ INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
+ /* Enable Ethernet flow steering
+ * with udp unicast and tcp unicast
+ */
+ MLX4_PUT(inbox, param->fs_hash_enable_bits,
+ INIT_HCA_FS_ETH_BITS_OFFSET);
+ MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
+ INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
+ /* Enable IPoIB flow steering
+ * with udp unicast and tcp unicast
+ */
+ MLX4_PUT(inbox, param->fs_hash_enable_bits,
+ INIT_HCA_FS_IB_BITS_OFFSET);
+ MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
+ INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
+ } else {
+ MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_entry_sz,
+ INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_hash_sz,
+ INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_table_sz,
+ INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0)
+ MLX4_PUT(inbox, (u8) (1 << 3),
+ INIT_HCA_UC_STEERING_OFFSET);
+ }
/* TPT attributes */
@@ -1188,15 +1236,24 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
- /* multicast attributes */
+ /* steering attributes */
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
- MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
- MLX4_GET(param->log_mc_entry_sz, outbox,
- INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
- MLX4_GET(param->log_mc_hash_sz, outbox,
- INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
- MLX4_GET(param->log_mc_table_sz, outbox,
- INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+ MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
+ MLX4_GET(param->log_mc_entry_sz, outbox,
+ INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
+ MLX4_GET(param->log_mc_table_sz, outbox,
+ INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
+ } else {
+ MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
+ MLX4_GET(param->log_mc_entry_sz, outbox,
+ INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+ MLX4_GET(param->log_mc_hash_sz, outbox,
+ INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+ MLX4_GET(param->log_mc_table_sz, outbox,
+ INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+ }
/* TPT attributes */
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 64c0399..83fcbbf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -78,6 +78,8 @@ struct mlx4_dev_cap {
u16 wavelength[MLX4_MAX_PORTS + 1];
u64 trans_code[MLX4_MAX_PORTS + 1];
u16 stat_rate_support;
+ int fs_log_max_ucast_qp_range_size;
+ int fs_max_num_qp_per_entry;
u64 flags;
u64 flags2;
int reserved_uars;
@@ -165,6 +167,7 @@ struct mlx4_init_hca_param {
u8 log_mpt_sz;
u8 log_uar_sz;
u8 uar_page_sz; /* log pg sz in 4k chunks */
+ u8 fs_hash_enable_bits;
};
struct mlx4_init_ib_param {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index ee6f4fe..4264516 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -41,6 +41,7 @@
#include <linux/slab.h>
#include <linux/io-mapping.h>
#include <linux/delay.h>
+#include <linux/netdevice.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
@@ -90,7 +91,9 @@ module_param_named(log_num_mgm_entry_size,
MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
" of qp per mcg, for example:"
" 10 gives 248.range: 9<="
- " log_num_mgm_entry_size <= 12");
+ " log_num_mgm_entry_size <= 12."
+ " Not in use with device managed"
+ " flow steering");
#define MLX4_VF (1 << 0)
@@ -243,7 +246,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.reserved_srqs = dev_cap->reserved_srqs;
dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
- dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
/*
* Subtract 1 from the limit because we need to allocate a
* spare CQE so the HCA HW can tell the difference between an
@@ -274,6 +276,28 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
+ if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
+ dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
+ dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
+ dev->caps.fs_log_max_ucast_qp_range_size =
+ dev_cap->fs_log_max_ucast_qp_range_size;
+ } else {
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) {
+ dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
+ } else {
+ dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
+ mlx4_warn(dev, "Must have UC_STEER and MC_STEER flags "
+ "set to use B0 steering. Falling back to A0 steering mode.\n");
+ }
+ dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
+ }
+ mlx4_dbg(dev, "Steering mode is: %s\n",
+ mlx4_steering_mode_str(dev->caps.steering_mode));
+
/* Sense port always allowed on supported devices for ConnectX1 and 2 */
if (dev->pdev->device != 0x1003)
dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
@@ -967,9 +991,11 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
}
/*
- * It's not strictly required, but for simplicity just map the
- * whole multicast group table now. The table isn't very big
- * and it's a lot easier than trying to track ref counts.
+ * For flow steering device managed mode it is required to use
+ * mlx4_init_icm_table. For B0 steering mode it's not strictly
+ * required, but for simplicity just map the whole multicast
+ * group table now. The table isn't very big and it's a lot
+ * easier than trying to track ref counts.
*/
err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
init_hca->mc_base,
@@ -1205,7 +1231,26 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
goto err_stop_fw;
}
+ priv->fs_hash_mode = MLX4_FS_L2_HASH;
+
+ switch (priv->fs_hash_mode) {
+ case MLX4_FS_L2_HASH:
+ init_hca.fs_hash_enable_bits = 0;
+ break;
+
+ case MLX4_FS_L2_L3_L4_HASH:
+ /* Enable flow steering with
+ * udp unicast and tcp unicast
+ */
+ init_hca.fs_hash_enable_bits =
+ MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN;
+ break;
+ }
+
profile = default_profile;
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ profile.num_mcg = MLX4_FS_NUM_MCG;
icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
&init_hca);
@@ -1539,8 +1584,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
struct msix_entry *entries;
int nreq = min_t(int, dev->caps.num_ports *
- min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT)
- + MSIX_LEGACY_SZ, MAX_MSIX);
+ min_t(int, netif_get_num_default_rss_queues() + 1,
+ MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
int err;
int i;
@@ -1975,6 +2020,8 @@ slave_start:
if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
!mlx4_is_mfunc(dev)) {
dev->flags &= ~MLX4_FLAG_MSI_X;
+ dev->caps.num_comp_vectors = 1;
+ dev->caps.comp_pool = 0;
pci_disable_msix(pdev);
err = mlx4_setup_hca(dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index f4a8f98..bc62f53 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -41,6 +41,7 @@
#define MGM_QPN_MASK 0x00FFFFFF
#define MGM_BLCK_LB_BIT 30
+#define MLX4_MAC_MASK 0xffffffffffffULL
static const u8 zero_gid[16]; /* automatically initialized to 0 */
@@ -54,7 +55,12 @@ struct mlx4_mgm {
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
{
- return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE);
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return 1 << MLX4_FS_MGM_LOG_ENTRY_SIZE;
+ else
+ return min((1 << mlx4_log_num_mgm_entry_size),
+ MLX4_MAX_MGM_ENTRY_SIZE);
}
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
@@ -62,6 +68,35 @@ int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
}
+static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev,
+ struct mlx4_cmd_mailbox *mailbox,
+ u32 size,
+ u64 *reg_id)
+{
+ u64 imm;
+ int err = 0;
+
+ err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0,
+ MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+ *reg_id = imm;
+
+ return err;
+}
+
+static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid)
+{
+ int err = 0;
+
+ err = mlx4_cmd(dev, regid, 0, 0,
+ MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+
+ return err;
+}
+
static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
struct mlx4_cmd_mailbox *mailbox)
{
@@ -614,6 +649,311 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
return err;
}
+struct mlx4_net_trans_rule_hw_ctrl {
+ __be32 ctrl;
+ __be32 vf_vep_port;
+ __be32 qpn;
+ __be32 reserved;
+};
+
+static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
+ struct mlx4_net_trans_rule_hw_ctrl *hw)
+{
+ static const u8 __promisc_mode[] = {
+ [MLX4_FS_PROMISC_NONE] = 0x0,
+ [MLX4_FS_PROMISC_UPLINK] = 0x1,
+ [MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2,
+ [MLX4_FS_PROMISC_ALL_MULTI] = 0x3,
+ };
+
+ u32 dw = 0;
+
+ dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
+ dw |= ctrl->exclusive ? (1 << 2) : 0;
+ dw |= ctrl->allow_loopback ? (1 << 3) : 0;
+ dw |= __promisc_mode[ctrl->promisc_mode] << 8;
+ dw |= ctrl->priority << 16;
+
+ hw->ctrl = cpu_to_be32(dw);
+ hw->vf_vep_port = cpu_to_be32(ctrl->port);
+ hw->qpn = cpu_to_be32(ctrl->qpn);
+}
+
+struct mlx4_net_trans_rule_hw_ib {
+ u8 size;
+ u8 rsvd1;
+ __be16 id;
+ u32 rsvd2;
+ __be32 qpn;
+ __be32 qpn_mask;
+ u8 dst_gid[16];
+ u8 dst_gid_msk[16];
+} __packed;
+
+struct mlx4_net_trans_rule_hw_eth {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ u8 rsvd1[6];
+ u8 dst_mac[6];
+ u16 rsvd2;
+ u8 dst_mac_msk[6];
+ u16 rsvd3;
+ u8 src_mac[6];
+ u16 rsvd4;
+ u8 src_mac_msk[6];
+ u8 rsvd5;
+ u8 ether_type_enable;
+ __be16 ether_type;
+ __be16 vlan_id_msk;
+ __be16 vlan_id;
+} __packed;
+
+struct mlx4_net_trans_rule_hw_tcp_udp {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ __be16 rsvd1[3];
+ __be16 dst_port;
+ __be16 rsvd2;
+ __be16 dst_port_msk;
+ __be16 rsvd3;
+ __be16 src_port;
+ __be16 rsvd4;
+ __be16 src_port_msk;
+} __packed;
+
+struct mlx4_net_trans_rule_hw_ipv4 {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ __be32 rsvd1;
+ __be32 dst_ip;
+ __be32 dst_ip_msk;
+ __be32 src_ip;
+ __be32 src_ip_msk;
+} __packed;
+
+struct _rule_hw {
+ union {
+ struct {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ };
+ struct mlx4_net_trans_rule_hw_eth eth;
+ struct mlx4_net_trans_rule_hw_ib ib;
+ struct mlx4_net_trans_rule_hw_ipv4 ipv4;
+ struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
+ };
+};
+
+static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
+ struct _rule_hw *rule_hw)
+{
+ static const u16 __sw_id_hw[] = {
+ [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001,
+ [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005,
+ [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003,
+ [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002,
+ [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004,
+ [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006
+ };
+
+ static const size_t __rule_hw_sz[] = {
+ [MLX4_NET_TRANS_RULE_ID_ETH] =
+ sizeof(struct mlx4_net_trans_rule_hw_eth),
+ [MLX4_NET_TRANS_RULE_ID_IB] =
+ sizeof(struct mlx4_net_trans_rule_hw_ib),
+ [MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
+ [MLX4_NET_TRANS_RULE_ID_IPV4] =
+ sizeof(struct mlx4_net_trans_rule_hw_ipv4),
+ [MLX4_NET_TRANS_RULE_ID_TCP] =
+ sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
+ [MLX4_NET_TRANS_RULE_ID_UDP] =
+ sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
+ };
+ if (spec->id > MLX4_NET_TRANS_RULE_NUM) {
+ mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id);
+ return -EINVAL;
+ }
+ memset(rule_hw, 0, __rule_hw_sz[spec->id]);
+ rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
+ rule_hw->size = __rule_hw_sz[spec->id] >> 2;
+
+ switch (spec->id) {
+ case MLX4_NET_TRANS_RULE_ID_ETH:
+ memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN);
+ memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk,
+ ETH_ALEN);
+ memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN);
+ memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk,
+ ETH_ALEN);
+ if (spec->eth.ether_type_enable) {
+ rule_hw->eth.ether_type_enable = 1;
+ rule_hw->eth.ether_type = spec->eth.ether_type;
+ }
+ rule_hw->eth.vlan_id = spec->eth.vlan_id;
+ rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk;
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_IB:
+ rule_hw->ib.qpn = spec->ib.r_qpn;
+ rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
+ memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
+ memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_IPV6:
+ return -EOPNOTSUPP;
+
+ case MLX4_NET_TRANS_RULE_ID_IPV4:
+ rule_hw->ipv4.src_ip = spec->ipv4.src_ip;
+ rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk;
+ rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip;
+ rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk;
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_TCP:
+ case MLX4_NET_TRANS_RULE_ID_UDP:
+ rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port;
+ rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk;
+ rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port;
+ rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return __rule_hw_sz[spec->id];
+}
+
+static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
+ struct mlx4_net_trans_rule *rule)
+{
+#define BUF_SIZE 256
+ struct mlx4_spec_list *cur;
+ char buf[BUF_SIZE];
+ int len = 0;
+
+ mlx4_err(dev, "%s", str);
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "port = %d prio = 0x%x qp = 0x%x ",
+ rule->port, rule->priority, rule->qpn);
+
+ list_for_each_entry(cur, &rule->list, list) {
+ switch (cur->id) {
+ case MLX4_NET_TRANS_RULE_ID_ETH:
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "dmac = %pM ", &cur->eth.dst_mac);
+ if (cur->eth.ether_type)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "ethertype = 0x%x ",
+ be16_to_cpu(cur->eth.ether_type));
+ if (cur->eth.vlan_id)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "vlan-id = %d ",
+ be16_to_cpu(cur->eth.vlan_id));
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_IPV4:
+ if (cur->ipv4.src_ip)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "src-ip = %pI4 ",
+ &cur->ipv4.src_ip);
+ if (cur->ipv4.dst_ip)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "dst-ip = %pI4 ",
+ &cur->ipv4.dst_ip);
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_TCP:
+ case MLX4_NET_TRANS_RULE_ID_UDP:
+ if (cur->tcp_udp.src_port)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "src-port = %d ",
+ be16_to_cpu(cur->tcp_udp.src_port));
+ if (cur->tcp_udp.dst_port)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "dst-port = %d ",
+ be16_to_cpu(cur->tcp_udp.dst_port));
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_IB:
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "dst-gid = %pI6\n", cur->ib.dst_gid);
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "dst-gid-mask = %pI6\n",
+ cur->ib.dst_gid_msk);
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_IPV6:
+ break;
+
+ default:
+ break;
+ }
+ }
+ len += snprintf(buf + len, BUF_SIZE - len, "\n");
+ mlx4_err(dev, "%s", buf);
+
+ if (len >= BUF_SIZE)
+ mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n");
+}
+
+int mlx4_flow_attach(struct mlx4_dev *dev,
+ struct mlx4_net_trans_rule *rule, u64 *reg_id)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_spec_list *cur;
+ u32 size = 0;
+ int ret;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl));
+ trans_rule_ctrl_to_hw(rule, mailbox->buf);
+
+ size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
+
+ list_for_each_entry(cur, &rule->list, list) {
+ ret = parse_trans_rule(dev, cur, mailbox->buf + size);
+ if (ret < 0) {
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return -EINVAL;
+ }
+ size += ret;
+ }
+
+ ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
+ if (ret == -ENOMEM)
+ mlx4_err_rule(dev,
+ "mcg table is full. Fail to register network rule.\n",
+ rule);
+ else if (ret)
+ mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mlx4_flow_attach);
+
+int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
+{
+ int err;
+
+ err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id);
+ if (err)
+ mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n",
+ reg_id);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_flow_detach);
+
int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot,
enum mlx4_steer_type steer)
@@ -866,49 +1206,159 @@ static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
}
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
- int block_mcast_loopback, enum mlx4_protocol prot)
+ u8 port, int block_mcast_loopback,
+ enum mlx4_protocol prot, u64 *reg_id)
{
- if (prot == MLX4_PROT_ETH &&
- !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
- return 0;
- if (prot == MLX4_PROT_ETH)
- gid[7] |= (MLX4_MC_STEER << 1);
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_A0:
+ if (prot == MLX4_PROT_ETH)
+ return 0;
+
+ case MLX4_STEERING_MODE_B0:
+ if (prot == MLX4_PROT_ETH)
+ gid[7] |= (MLX4_MC_STEER << 1);
+
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 1,
+ block_mcast_loopback, prot);
+ return mlx4_qp_attach_common(dev, qp, gid,
+ block_mcast_loopback, prot,
+ MLX4_MC_STEER);
+
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ struct mlx4_spec_list spec = { {NULL} };
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .promisc_mode = MLX4_FS_PROMISC_NONE,
+ .priority = MLX4_DOMAIN_NIC,
+ };
+
+ rule.allow_loopback = ~block_mcast_loopback;
+ rule.port = port;
+ rule.qpn = qp->qpn;
+ INIT_LIST_HEAD(&rule.list);
+
+ switch (prot) {
+ case MLX4_PROT_ETH:
+ spec.id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN);
+ memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+ break;
- if (mlx4_is_mfunc(dev))
- return mlx4_QP_ATTACH(dev, qp, gid, 1,
- block_mcast_loopback, prot);
+ case MLX4_PROT_IB_IPV6:
+ spec.id = MLX4_NET_TRANS_RULE_ID_IB;
+ memcpy(spec.ib.dst_gid, gid, 16);
+ memset(&spec.ib.dst_gid_msk, 0xff, 16);
+ break;
+ default:
+ return -EINVAL;
+ }
+ list_add_tail(&spec.list, &rule.list);
- return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
- prot, MLX4_MC_STEER);
+ return mlx4_flow_attach(dev, &rule, reg_id);
+ }
+
+ default:
+ return -EINVAL;
+ }
}
EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
- enum mlx4_protocol prot)
+ enum mlx4_protocol prot, u64 reg_id)
{
- if (prot == MLX4_PROT_ETH &&
- !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
- return 0;
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_A0:
+ if (prot == MLX4_PROT_ETH)
+ return 0;
- if (prot == MLX4_PROT_ETH)
- gid[7] |= (MLX4_MC_STEER << 1);
+ case MLX4_STEERING_MODE_B0:
+ if (prot == MLX4_PROT_ETH)
+ gid[7] |= (MLX4_MC_STEER << 1);
- if (mlx4_is_mfunc(dev))
- return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
+
+ return mlx4_qp_detach_common(dev, qp, gid, prot,
+ MLX4_MC_STEER);
+
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ return mlx4_flow_detach(dev, reg_id);
- return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_MC_STEER);
+ default:
+ return -EINVAL;
+ }
}
EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
+int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
+ u32 qpn, enum mlx4_net_trans_promisc_mode mode)
+{
+ struct mlx4_net_trans_rule rule;
+ u64 *regid_p;
+
+ switch (mode) {
+ case MLX4_FS_PROMISC_UPLINK:
+ case MLX4_FS_PROMISC_FUNCTION_PORT:
+ regid_p = &dev->regid_promisc_array[port];
+ break;
+ case MLX4_FS_PROMISC_ALL_MULTI:
+ regid_p = &dev->regid_allmulti_array[port];
+ break;
+ default:
+ return -1;
+ }
+
+ if (*regid_p != 0)
+ return -1;
+
+ rule.promisc_mode = mode;
+ rule.port = port;
+ rule.qpn = qpn;
+ INIT_LIST_HEAD(&rule.list);
+ mlx4_err(dev, "going promisc on %x\n", port);
+
+ return mlx4_flow_attach(dev, &rule, regid_p);
+}
+EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add);
+
+int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
+ enum mlx4_net_trans_promisc_mode mode)
+{
+ int ret;
+ u64 *regid_p;
+
+ switch (mode) {
+ case MLX4_FS_PROMISC_UPLINK:
+ case MLX4_FS_PROMISC_FUNCTION_PORT:
+ regid_p = &dev->regid_promisc_array[port];
+ break;
+ case MLX4_FS_PROMISC_ALL_MULTI:
+ regid_p = &dev->regid_allmulti_array[port];
+ break;
+ default:
+ return -1;
+ }
+
+ if (*regid_p == 0)
+ return -1;
+
+ ret = mlx4_flow_detach(dev, *regid_p);
+ if (ret == 0)
+ *regid_p = 0;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove);
+
int mlx4_unicast_attach(struct mlx4_dev *dev,
struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot)
{
- if (prot == MLX4_PROT_ETH &&
- !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- return 0;
-
if (prot == MLX4_PROT_ETH)
gid[7] |= (MLX4_UC_STEER << 1);
@@ -924,10 +1374,6 @@ EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
u8 gid[16], enum mlx4_protocol prot)
{
- if (prot == MLX4_PROT_ETH &&
- !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- return 0;
-
if (prot == MLX4_PROT_ETH)
gid[7] |= (MLX4_UC_STEER << 1);
@@ -968,9 +1414,6 @@ static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
- return 0;
-
if (mlx4_is_mfunc(dev))
return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
@@ -980,9 +1423,6 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
- return 0;
-
if (mlx4_is_mfunc(dev))
return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
@@ -992,9 +1432,6 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- return 0;
-
if (mlx4_is_mfunc(dev))
return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
@@ -1004,9 +1441,6 @@ EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- return 0;
-
if (mlx4_is_mfunc(dev))
return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
@@ -1019,6 +1453,10 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
+ /* No need for mcg_table when fw managed the mcg table*/
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return 0;
err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
dev->caps.num_amgms - 1, 0, 0);
if (err)
@@ -1031,5 +1469,7 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
{
- mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
+ if (dev->caps.steering_mode !=
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index e5d2022..d2c436b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -39,6 +39,7 @@
#include <linux/mutex.h>
#include <linux/radix-tree.h>
+#include <linux/rbtree.h>
#include <linux/timer.h>
#include <linux/semaphore.h>
#include <linux/workqueue.h>
@@ -53,6 +54,17 @@
#define DRV_VERSION "1.1"
#define DRV_RELDATE "Dec, 2011"
+#define MLX4_FS_UDP_UC_EN (1 << 1)
+#define MLX4_FS_TCP_UC_EN (1 << 2)
+#define MLX4_FS_NUM_OF_L2_ADDR 8
+#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7
+#define MLX4_FS_NUM_MCG (1 << 17)
+
+enum {
+ MLX4_FS_L2_HASH = 0,
+ MLX4_FS_L2_L3_L4_HASH,
+};
+
#define MLX4_NUM_UP 8
#define MLX4_NUM_TC 8
#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
@@ -137,6 +149,7 @@ enum mlx4_resource {
RES_VLAN,
RES_EQ,
RES_COUNTER,
+ RES_FS_RULE,
MLX4_NUM_OF_RESOURCE_TYPE
};
@@ -509,7 +522,7 @@ struct slave_list {
struct mlx4_resource_tracker {
spinlock_t lock;
/* tree for each resources */
- struct radix_tree_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
+ struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
/* num_of_slave's lists, one per slave */
struct slave_list *slave_list;
};
@@ -703,6 +716,7 @@ struct mlx4_set_port_rqp_calc_context {
struct mlx4_mac_entry {
u64 mac;
+ u64 reg_id;
};
struct mlx4_port_info {
@@ -776,6 +790,7 @@ struct mlx4_priv {
struct mutex bf_mutex;
struct io_mapping *bf_mapping;
int reserved_mtts;
+ int fs_hash_mode;
};
static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -1032,7 +1047,7 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
/* resource tracker functions*/
int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
enum mlx4_resource resource_type,
- int resource_id, int *slave);
+ u64 resource_id, int *slave);
void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
int mlx4_init_resource_tracker(struct mlx4_dev *dev);
@@ -1117,6 +1132,16 @@ int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
+int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 6ae3509..a126321 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -75,6 +75,7 @@
#define STAMP_SHIFT 31
#define STAMP_VAL 0x7fffffff
#define STATS_DELAY (HZ / 4)
+#define MAX_NUM_OF_FS_RULES 256
/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
#define MAX_DESC_SIZE 512
@@ -404,6 +405,19 @@ struct mlx4_en_perf_stats {
#define NUM_PERF_COUNTERS 6
};
+enum mlx4_en_mclist_act {
+ MCLIST_NONE,
+ MCLIST_REM,
+ MCLIST_ADD,
+};
+
+struct mlx4_en_mc_list {
+ struct list_head list;
+ enum mlx4_en_mclist_act action;
+ u8 addr[ETH_ALEN];
+ u64 reg_id;
+};
+
struct mlx4_en_frag_info {
u16 frag_size;
u16 frag_prefix_size;
@@ -422,6 +436,11 @@ struct mlx4_en_frag_info {
#endif
+struct ethtool_flow_id {
+ struct ethtool_rx_flow_spec flow_spec;
+ u64 id;
+};
+
struct mlx4_en_priv {
struct mlx4_en_dev *mdev;
struct mlx4_en_port_profile *prof;
@@ -431,6 +450,7 @@ struct mlx4_en_priv {
struct net_device_stats ret_stats;
struct mlx4_en_port_state port_state;
spinlock_t stats_lock;
+ struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
unsigned long last_moder_packets[MAX_RX_RINGS];
unsigned long last_moder_tx_packets;
@@ -480,6 +500,7 @@ struct mlx4_en_priv {
struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
struct mlx4_en_cq *tx_cq;
struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
+ struct mlx4_qp drop_qp;
struct work_struct mcast_task;
struct work_struct mac_task;
struct work_struct watchdog_task;
@@ -489,12 +510,14 @@ struct mlx4_en_priv {
struct mlx4_en_pkt_stats pkstats;
struct mlx4_en_port_stats port_stats;
u64 stats_bitmap;
- char *mc_addrs;
- int mc_addrs_cnt;
+ struct list_head mc_list;
+ struct list_head curr_list;
+ u64 broadcast_id;
struct mlx4_en_stat_out_mbox hw_stats;
int vids[128];
bool wol;
struct device *ddev;
+ int base_tx_qpn;
#ifdef CONFIG_MLX4_EN_DCB
struct ieee_ets ets;
@@ -564,6 +587,8 @@ void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
void mlx4_en_calc_rx_buf(struct net_device *dev);
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
+int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv);
+void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv);
int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
void mlx4_en_rx_irq(struct mlx4_cq *mcq);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 1fe2c7a..a51d1b9b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -75,21 +75,54 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
table->total = 0;
}
-static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
+static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
+ u64 mac, int *qpn, u64 *reg_id)
{
- struct mlx4_qp qp;
- u8 gid[16] = {0};
__be64 be_mac;
int err;
- qp.qpn = *qpn;
-
- mac &= 0xffffffffffffULL;
+ mac &= MLX4_MAC_MASK;
be_mac = cpu_to_be64(mac << 16);
- memcpy(&gid[10], &be_mac, ETH_ALEN);
- gid[5] = port;
- err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_B0: {
+ struct mlx4_qp qp;
+ u8 gid[16] = {0};
+
+ qp.qpn = *qpn;
+ memcpy(&gid[10], &be_mac, ETH_ALEN);
+ gid[5] = port;
+
+ err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
+ break;
+ }
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ struct mlx4_spec_list spec_eth = { {NULL} };
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .allow_loopback = 1,
+ .promisc_mode = MLX4_FS_PROMISC_NONE,
+ .priority = MLX4_DOMAIN_NIC,
+ };
+
+ rule.port = port;
+ rule.qpn = *qpn;
+ INIT_LIST_HEAD(&rule.list);
+
+ spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec_eth.eth.dst_mac, &be_mac, ETH_ALEN);
+ memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+ list_add_tail(&spec_eth.list, &rule.list);
+
+ err = mlx4_flow_attach(dev, &rule, reg_id);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
if (err)
mlx4_warn(dev, "Failed Attaching Unicast\n");
@@ -97,19 +130,30 @@ static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
}
static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
- u64 mac, int qpn)
+ u64 mac, int qpn, u64 reg_id)
{
- struct mlx4_qp qp;
- u8 gid[16] = {0};
- __be64 be_mac;
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_B0: {
+ struct mlx4_qp qp;
+ u8 gid[16] = {0};
+ __be64 be_mac;
- qp.qpn = qpn;
- mac &= 0xffffffffffffULL;
- be_mac = cpu_to_be64(mac << 16);
- memcpy(&gid[10], &be_mac, ETH_ALEN);
- gid[5] = port;
+ qp.qpn = qpn;
+ mac &= MLX4_MAC_MASK;
+ be_mac = cpu_to_be64(mac << 16);
+ memcpy(&gid[10], &be_mac, ETH_ALEN);
+ gid[5] = port;
- mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
+ mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
+ break;
+ }
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ mlx4_flow_detach(dev, reg_id);
+ break;
+ }
+ default:
+ mlx4_err(dev, "Invalid steering mode.\n");
+ }
}
static int validate_index(struct mlx4_dev *dev,
@@ -144,6 +188,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
struct mlx4_mac_entry *entry;
int index = 0;
int err = 0;
+ u64 reg_id;
mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
(unsigned long long) mac);
@@ -155,7 +200,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
return err;
}
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) {
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
*qpn = info->base_qpn + index;
return 0;
}
@@ -167,7 +212,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
goto qp_err;
}
- err = mlx4_uc_steer_add(dev, port, mac, qpn);
+ err = mlx4_uc_steer_add(dev, port, mac, qpn, &reg_id);
if (err)
goto steer_err;
@@ -177,6 +222,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
goto alloc_err;
}
entry->mac = mac;
+ entry->reg_id = reg_id;
err = radix_tree_insert(&info->mac_tree, *qpn, entry);
if (err)
goto insert_err;
@@ -186,7 +232,7 @@ insert_err:
kfree(entry);
alloc_err:
- mlx4_uc_steer_release(dev, port, mac, *qpn);
+ mlx4_uc_steer_release(dev, port, mac, *qpn, reg_id);
steer_err:
mlx4_qp_release_range(dev, *qpn, 1);
@@ -206,13 +252,14 @@ void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
(unsigned long long) mac);
mlx4_unregister_mac(dev, port, mac);
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (entry) {
mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
" qpn %d\n", port,
(unsigned long long) mac, qpn);
- mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+ mlx4_uc_steer_release(dev, port, entry->mac,
+ qpn, entry->reg_id);
mlx4_qp_release_range(dev, qpn, 1);
radix_tree_delete(&info->mac_tree, qpn);
kfree(entry);
@@ -359,15 +406,18 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
int index = qpn - info->base_qpn;
int err = 0;
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (!entry)
return -EINVAL;
- mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+ mlx4_uc_steer_release(dev, port, entry->mac,
+ qpn, entry->reg_id);
mlx4_unregister_mac(dev, port, entry->mac);
entry->mac = new_mac;
+ entry->reg_id = 0;
mlx4_register_mac(dev, port, new_mac);
- err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn);
+ err = mlx4_uc_steer_add(dev, port, entry->mac,
+ &qpn, &entry->reg_id);
return err;
}
@@ -697,10 +747,10 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
if (slave != dev->caps.function)
memset(inbox->buf, 0, 256);
if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
- *(u8 *) inbox->buf = !!reset_qkey_viols << 6;
+ *(u8 *) inbox->buf |= !!reset_qkey_viols << 6;
((__be32 *) inbox->buf)[2] = agg_cap_mask;
} else {
- ((u8 *) inbox->buf)[3] = !!reset_qkey_viols;
+ ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols;
((__be32 *) inbox->buf)[1] = agg_cap_mask;
}
@@ -803,8 +853,7 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
MCAST_DIRECT : MCAST_DEFAULT;
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER &&
- dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
return 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index b83bc92..9ee4725 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -237,13 +237,19 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
init_hca->mtt_base = profile[i].start;
break;
case MLX4_RES_MCG:
- dev->caps.num_mgms = profile[i].num >> 1;
- dev->caps.num_amgms = profile[i].num >> 1;
init_hca->mc_base = profile[i].start;
init_hca->log_mc_entry_sz =
ilog2(mlx4_get_mgm_entry_size(dev));
init_hca->log_mc_table_sz = profile[i].log_num;
- init_hca->log_mc_hash_sz = profile[i].log_num - 1;
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ dev->caps.num_mgms = profile[i].num;
+ } else {
+ init_hca->log_mc_hash_sz =
+ profile[i].log_num - 1;
+ dev->caps.num_mgms = profile[i].num >> 1;
+ dev->caps.num_amgms = profile[i].num >> 1;
+ }
break;
default:
break;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index b45d0e7..c3fa919 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -57,7 +57,8 @@ struct mac_res {
struct res_common {
struct list_head list;
- u32 res_id;
+ struct rb_node node;
+ u64 res_id;
int owner;
int state;
int from_state;
@@ -189,6 +190,58 @@ struct res_xrcdn {
int port;
};
+enum res_fs_rule_states {
+ RES_FS_RULE_BUSY = RES_ANY_BUSY,
+ RES_FS_RULE_ALLOCATED,
+};
+
+struct res_fs_rule {
+ struct res_common com;
+};
+
+static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct res_common *res = container_of(node, struct res_common,
+ node);
+
+ if (res_id < res->res_id)
+ node = node->rb_left;
+ else if (res_id > res->res_id)
+ node = node->rb_right;
+ else
+ return res;
+ }
+ return NULL;
+}
+
+static int res_tracker_insert(struct rb_root *root, struct res_common *res)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ struct res_common *this = container_of(*new, struct res_common,
+ node);
+
+ parent = *new;
+ if (res->res_id < this->res_id)
+ new = &((*new)->rb_left);
+ else if (res->res_id > this->res_id)
+ new = &((*new)->rb_right);
+ else
+ return -EEXIST;
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&res->node, parent, new);
+ rb_insert_color(&res->node, root);
+
+ return 0;
+}
+
/* For Debug uses */
static const char *ResourceType(enum mlx4_resource rt)
{
@@ -201,6 +254,7 @@ static const char *ResourceType(enum mlx4_resource rt)
case RES_MAC: return "RES_MAC";
case RES_EQ: return "RES_EQ";
case RES_COUNTER: return "RES_COUNTER";
+ case RES_FS_RULE: return "RES_FS_RULE";
case RES_XRCD: return "RES_XRCD";
default: return "Unknown resource type !!!";
};
@@ -228,8 +282,7 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
dev->num_slaves);
for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
- INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
- GFP_ATOMIC|__GFP_NOWARN);
+ priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
spin_lock_init(&priv->mfunc.master.res_tracker.lock);
return 0 ;
@@ -277,11 +330,11 @@ static void *find_res(struct mlx4_dev *dev, int res_id,
{
struct mlx4_priv *priv = mlx4_priv(dev);
- return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
- res_id);
+ return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
+ res_id);
}
-static int get_res(struct mlx4_dev *dev, int slave, int res_id,
+static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
enum mlx4_resource type,
void *res)
{
@@ -307,7 +360,7 @@ static int get_res(struct mlx4_dev *dev, int slave, int res_id,
r->from_state = r->state;
r->state = RES_ANY_BUSY;
- mlx4_dbg(dev, "res %s id 0x%x to busy\n",
+ mlx4_dbg(dev, "res %s id 0x%llx to busy\n",
ResourceType(type), r->res_id);
if (res)
@@ -320,7 +373,7 @@ exit:
int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
enum mlx4_resource type,
- int res_id, int *slave)
+ u64 res_id, int *slave)
{
struct res_common *r;
@@ -341,7 +394,7 @@ int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
return err;
}
-static void put_res(struct mlx4_dev *dev, int slave, int res_id,
+static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
enum mlx4_resource type)
{
struct res_common *r;
@@ -473,7 +526,21 @@ static struct res_common *alloc_xrcdn_tr(int id)
return &ret->com;
}
-static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
+static struct res_common *alloc_fs_rule_tr(u64 id)
+{
+ struct res_fs_rule *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_FS_RULE_ALLOCATED;
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
int extra)
{
struct res_common *ret;
@@ -506,6 +573,9 @@ static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
case RES_XRCD:
ret = alloc_xrcdn_tr(id);
break;
+ case RES_FS_RULE:
+ ret = alloc_fs_rule_tr(id);
+ break;
default:
return NULL;
}
@@ -515,7 +585,7 @@ static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
return ret;
}
-static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
enum mlx4_resource type, int extra)
{
int i;
@@ -523,7 +593,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
struct mlx4_priv *priv = mlx4_priv(dev);
struct res_common **res_arr;
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
- struct radix_tree_root *root = &tracker->res_tree[type];
+ struct rb_root *root = &tracker->res_tree[type];
res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
if (!res_arr)
@@ -546,7 +616,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
err = -EEXIST;
goto undo;
}
- err = radix_tree_insert(root, base + i, res_arr[i]);
+ err = res_tracker_insert(root, res_arr[i]);
if (err)
goto undo;
list_add_tail(&res_arr[i]->list,
@@ -559,7 +629,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
undo:
for (--i; i >= base; --i)
- radix_tree_delete(&tracker->res_tree[type], i);
+ rb_erase(&res_arr[i]->node, root);
spin_unlock_irq(mlx4_tlock(dev));
@@ -638,6 +708,16 @@ static int remove_xrcdn_ok(struct res_xrcdn *res)
return 0;
}
+static int remove_fs_rule_ok(struct res_fs_rule *res)
+{
+ if (res->com.state == RES_FS_RULE_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_FS_RULE_ALLOCATED)
+ return -EPERM;
+
+ return 0;
+}
+
static int remove_cq_ok(struct res_cq *res)
{
if (res->com.state == RES_CQ_BUSY)
@@ -679,15 +759,17 @@ static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
return remove_counter_ok((struct res_counter *)res);
case RES_XRCD:
return remove_xrcdn_ok((struct res_xrcdn *)res);
+ case RES_FS_RULE:
+ return remove_fs_rule_ok((struct res_fs_rule *)res);
default:
return -EINVAL;
}
}
-static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
enum mlx4_resource type, int extra)
{
- int i;
+ u64 i;
int err;
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
@@ -695,7 +777,7 @@ static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
spin_lock_irq(mlx4_tlock(dev));
for (i = base; i < base + count; ++i) {
- r = radix_tree_lookup(&tracker->res_tree[type], i);
+ r = res_tracker_lookup(&tracker->res_tree[type], i);
if (!r) {
err = -ENOENT;
goto out;
@@ -710,8 +792,8 @@ static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
}
for (i = base; i < base + count; ++i) {
- r = radix_tree_lookup(&tracker->res_tree[type], i);
- radix_tree_delete(&tracker->res_tree[type], i);
+ r = res_tracker_lookup(&tracker->res_tree[type], i);
+ rb_erase(&r->node, &tracker->res_tree[type]);
list_del(&r->list);
kfree(r);
}
@@ -733,7 +815,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
+ r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
@@ -741,7 +823,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
else {
switch (state) {
case RES_QP_BUSY:
- mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
+ mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
__func__, r->com.res_id);
err = -EBUSY;
break;
@@ -750,7 +832,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
if (r->com.state == RES_QP_MAPPED && !alloc)
break;
- mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
+ mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
err = -EINVAL;
break;
@@ -759,7 +841,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
r->com.state == RES_QP_HW)
break;
else {
- mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
+ mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
r->com.res_id);
err = -EINVAL;
}
@@ -779,7 +861,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
r->com.to_state = state;
r->com.state = RES_QP_BUSY;
if (qp)
- *qp = (struct res_qp *)r;
+ *qp = r;
}
}
@@ -797,7 +879,7 @@ static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
+ r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
@@ -832,7 +914,7 @@ static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
r->com.to_state = state;
r->com.state = RES_MPT_BUSY;
if (mpt)
- *mpt = (struct res_mpt *)r;
+ *mpt = r;
}
}
@@ -850,7 +932,7 @@ static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
+ r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
@@ -898,7 +980,7 @@ static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
int err;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
+ r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
@@ -952,7 +1034,7 @@ static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
+ r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
@@ -1001,7 +1083,7 @@ static void res_abort_move(struct mlx4_dev *dev, int slave,
struct res_common *r;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[type], id);
+ r = res_tracker_lookup(&tracker->res_tree[type], id);
if (r && (r->owner == slave))
r->state = r->from_state;
spin_unlock_irq(mlx4_tlock(dev));
@@ -1015,7 +1097,7 @@ static void res_end_move(struct mlx4_dev *dev, int slave,
struct res_common *r;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[type], id);
+ r = res_tracker_lookup(&tracker->res_tree[type], id);
if (r && (r->owner == slave))
r->state = r->to_state;
spin_unlock_irq(mlx4_tlock(dev));
@@ -2695,6 +2777,60 @@ ex_put:
return err;
}
+int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+
+ if (dev->caps.steering_mode !=
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return -EOPNOTSUPP;
+
+ err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
+ vhcr->in_modifier, 0,
+ MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+
+ err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
+ if (err) {
+ mlx4_err(dev, "Fail to add flow steering resources.\n ");
+ /* detach rule*/
+ mlx4_cmd(dev, vhcr->out_param, 0, 0,
+ MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ }
+ return err;
+}
+
+int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+
+ if (dev->caps.steering_mode !=
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return -EOPNOTSUPP;
+
+ err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
+ if (err) {
+ mlx4_err(dev, "Fail to remove flow steering resources.\n ");
+ return err;
+ }
+
+ err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
+ MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ return err;
+}
+
enum {
BUSY_MAX_RETRIES = 10
};
@@ -2751,7 +2887,7 @@ static int _move_all_busy(struct mlx4_dev *dev, int slave,
if (r->state == RES_ANY_BUSY) {
if (print)
mlx4_dbg(dev,
- "%s id 0x%x is busy\n",
+ "%s id 0x%llx is busy\n",
ResourceType(type),
r->res_id);
++busy;
@@ -2817,8 +2953,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
switch (state) {
case RES_QP_RESERVED:
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_QP],
- qp->com.res_id);
+ rb_erase(&qp->com.node,
+ &tracker->res_tree[RES_QP]);
list_del(&qp->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(qp);
@@ -2888,8 +3024,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
case RES_SRQ_ALLOCATED:
__mlx4_srq_free_icm(dev, srqn);
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_SRQ],
- srqn);
+ rb_erase(&srq->com.node,
+ &tracker->res_tree[RES_SRQ]);
list_del(&srq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(srq);
@@ -2954,8 +3090,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
case RES_CQ_ALLOCATED:
__mlx4_cq_free_icm(dev, cqn);
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_CQ],
- cqn);
+ rb_erase(&cq->com.node,
+ &tracker->res_tree[RES_CQ]);
list_del(&cq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(cq);
@@ -3017,8 +3153,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
case RES_MPT_RESERVED:
__mlx4_mr_release(dev, mpt->key);
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_MPT],
- mptn);
+ rb_erase(&mpt->com.node,
+ &tracker->res_tree[RES_MPT]);
list_del(&mpt->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(mpt);
@@ -3086,8 +3222,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
__mlx4_free_mtt_range(dev, base,
mtt->order);
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_MTT],
- base);
+ rb_erase(&mtt->com.node,
+ &tracker->res_tree[RES_MTT]);
list_del(&mtt->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(mtt);
@@ -3104,6 +3240,58 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
spin_unlock_irq(mlx4_tlock(dev));
}
+static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker =
+ &priv->mfunc.master.res_tracker;
+ struct list_head *fs_rule_list =
+ &tracker->slave_list[slave].res_list[RES_FS_RULE];
+ struct res_fs_rule *fs_rule;
+ struct res_fs_rule *tmp;
+ int state;
+ u64 base;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_FS_RULE);
+ if (err)
+ mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
+ slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (fs_rule->com.owner == slave) {
+ base = fs_rule->com.res_id;
+ state = fs_rule->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_FS_RULE_ALLOCATED:
+ /* detach rule */
+ err = mlx4_cmd(dev, base, 0, 0,
+ MLX4_QP_FLOW_STEERING_DETACH,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ rb_erase(&fs_rule->com.node,
+ &tracker->res_tree[RES_FS_RULE]);
+ list_del(&fs_rule->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(fs_rule);
+ state = 0;
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -3133,8 +3321,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
switch (state) {
case RES_EQ_RESERVED:
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_EQ],
- eqn);
+ rb_erase(&eq->com.node,
+ &tracker->res_tree[RES_EQ]);
list_del(&eq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(eq);
@@ -3191,7 +3379,8 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
if (counter->com.owner == slave) {
index = counter->com.res_id;
- radix_tree_delete(&tracker->res_tree[RES_COUNTER], index);
+ rb_erase(&counter->com.node,
+ &tracker->res_tree[RES_COUNTER]);
list_del(&counter->com.list);
kfree(counter);
__mlx4_counter_free(dev, index);
@@ -3220,7 +3409,7 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
if (xrcd->com.owner == slave) {
xrcdn = xrcd->com.res_id;
- radix_tree_delete(&tracker->res_tree[RES_XRCD], xrcdn);
+ rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
list_del(&xrcd->com.list);
kfree(xrcd);
__mlx4_xrcd_free(dev, xrcdn);
@@ -3244,5 +3433,6 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
rem_slave_mtts(dev, slave);
rem_slave_counters(dev, slave);
rem_slave_xrcdns(dev, slave);
+ rem_slave_fs_rule(dev, slave);
mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
}
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 5ffde23..59ef568 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -16,8 +16,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/**
- * Supports:
+/* Supports:
* KS8851 16bit MLL chip from Micrel Inc.
*/
@@ -35,7 +34,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/ks8851_mll.h>
#define DRV_NAME "ks8851_mll"
@@ -465,8 +464,7 @@ static int msg_enable;
#define BE1 0x2000 /* Byte Enable 1 */
#define BE0 0x1000 /* Byte Enable 0 */
-/**
- * register read/write calls.
+/* register read/write calls.
*
* All these calls issue transactions to access the chip's registers. They
* all require that the necessary lock is held to prevent accesses when the
@@ -1103,7 +1101,7 @@ static void ks_set_grpaddr(struct ks_net *ks)
}
} /* ks_set_grpaddr */
-/*
+/**
* ks_clear_mcast - clear multicast information
*
* @ks : The chip information
@@ -1515,6 +1513,7 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
struct net_device *netdev;
struct ks_net *ks;
u16 id, data;
+ struct ks8851_mll_platform_data *pdata;
io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -1596,17 +1595,27 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
ks_disable_qmu(ks);
ks_setup(ks);
ks_setup_int(ks);
- memcpy(netdev->dev_addr, ks->mac_addr, 6);
data = ks_rdreg16(ks, KS_OBCR);
ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
- /**
- * If you want to use the default MAC addr,
- * comment out the 2 functions below.
- */
+ /* overwriting the default MAC address */
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ netdev_err(netdev, "No platform data\n");
+ err = -ENODEV;
+ goto err_pdata;
+ }
+ memcpy(ks->mac_addr, pdata->mac_addr, 6);
+ if (!is_valid_ether_addr(ks->mac_addr)) {
+ /* Use random MAC address if none passed */
+ random_ether_addr(ks->mac_addr);
+ netdev_info(netdev, "Using random mac address\n");
+ }
+ netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
+
+ memcpy(netdev->dev_addr, ks->mac_addr, 6);
- random_ether_addr(netdev->dev_addr);
ks_set_mac(ks, netdev->dev_addr);
id = ks_rdreg16(ks, KS_CIDER);
@@ -1615,6 +1624,8 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
(id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
return 0;
+err_pdata:
+ unregister_netdev(netdev);
err_register:
err_get_irq:
iounmap(ks->hw_addr_cmd);
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index eaf9ff0..318fee9 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -3913,7 +3913,7 @@ static void hw_start_rx(struct ksz_hw *hw)
hw->rx_stop = 2;
}
-/*
+/**
* hw_stop_rx - stop receiving
* @hw: The hardware instance.
*
@@ -4480,14 +4480,12 @@ static void ksz_init_rx_buffers(struct dev_info *adapter)
dma_buf->len = adapter->mtu;
if (!dma_buf->skb)
dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC);
- if (dma_buf->skb && !dma_buf->dma) {
- dma_buf->skb->dev = adapter->dev;
+ if (dma_buf->skb && !dma_buf->dma)
dma_buf->dma = pci_map_single(
adapter->pdev,
skb_tail_pointer(dma_buf->skb),
dma_buf->len,
PCI_DMA_FROMDEVICE);
- }
/* Set descriptor. */
set_rx_buf(desc, dma_buf->dma);
@@ -4881,8 +4879,8 @@ static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
left = hw_alloc_pkt(hw, skb->len, num);
if (left) {
if (left < num ||
- ((CHECKSUM_PARTIAL == skb->ip_summed) &&
- (ETH_P_IPV6 == htons(skb->protocol)))) {
+ (CHECKSUM_PARTIAL == skb->ip_summed &&
+ skb->protocol == htons(ETH_P_IPV6))) {
struct sk_buff *org_skb = skb;
skb = netdev_alloc_skb(dev, org_skb->len);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 90153fc..fa85cf1 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3775,7 +3775,7 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
mgp->num_slices = 1;
msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- ncpus = num_online_cpus();
+ ncpus = netif_get_num_default_rss_queues();
if (myri10ge_max_slices == 1 || msix_cap == 0 ||
(myri10ge_max_slices == -1 && ncpus < 2))
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index bb36758..d958c22 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -3377,7 +3377,7 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
} while (cnt < 20);
return ret;
}
-/*
+/**
* check_pci_device_id - Checks if the device id is supported
* @id : device id
* Description: Function to check if the pci device id is supported by driver.
@@ -5238,7 +5238,7 @@ static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
}
/**
- * s2io_set_mac_addr driver entry point
+ * s2io_set_mac_addr - driver entry point
*/
static int s2io_set_mac_addr(struct net_device *dev, void *p)
@@ -6088,7 +6088,7 @@ static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
}
/**
- * s2io-link_test - verifies the link state of the nic
+ * s2io_link_test - verifies the link state of the nic
* @sp ; private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @data: variable that returns the result of each of the test conducted by
@@ -6116,9 +6116,9 @@ static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
/**
* s2io_rldram_test - offline test for access to the RldRam chip on the NIC
- * @sp - private member of the device structure, which is a pointer to the
+ * @sp: private member of the device structure, which is a pointer to the
* s2io_nic structure.
- * @data - variable that returns the result of each of the test
+ * @data: variable that returns the result of each of the test
* conducted by the driver.
* Description:
* This is one of the offline test that tests the read and write
@@ -6946,9 +6946,9 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp)
if (sp->rxd_mode == RXD_MODE_3B)
ba = &ring->ba[j][k];
if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
- (u64 *)&temp0_64,
- (u64 *)&temp1_64,
- (u64 *)&temp2_64,
+ &temp0_64,
+ &temp1_64,
+ &temp2_64,
size) == -ENOMEM) {
return 0;
}
@@ -7149,7 +7149,7 @@ static int s2io_card_up(struct s2io_nic *sp)
int i, ret = 0;
struct config_param *config;
struct mac_info *mac_control;
- struct net_device *dev = (struct net_device *)sp->dev;
+ struct net_device *dev = sp->dev;
u16 interruptible;
/* Initialize the H/W I/O registers */
@@ -7325,7 +7325,7 @@ static void s2io_tx_watchdog(struct net_device *dev)
static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
{
struct s2io_nic *sp = ring_data->nic;
- struct net_device *dev = (struct net_device *)ring_data->dev;
+ struct net_device *dev = ring_data->dev;
struct sk_buff *skb = (struct sk_buff *)
((unsigned long)rxdp->Host_Control);
int ring_no = ring_data->ring_no;
@@ -7508,7 +7508,7 @@ aggregate:
static void s2io_link(struct s2io_nic *sp, int link)
{
- struct net_device *dev = (struct net_device *)sp->dev;
+ struct net_device *dev = sp->dev;
struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
if (link != sp->last_link_state) {
@@ -8280,7 +8280,7 @@ static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
return -1;
}
- *ip = (struct iphdr *)((u8 *)buffer + ip_off);
+ *ip = (struct iphdr *)(buffer + ip_off);
ip_len = (u8)((*ip)->ihl);
ip_len <<= 2;
*tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 98e2c10..32d0682 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -2346,7 +2346,7 @@ void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
for (i = 0; i < nreq; i++)
vxge_os_dma_malloc_async(
- ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ (blockpool->hldev)->pdev,
blockpool->hldev, VXGE_HW_BLOCK_SIZE);
}
@@ -2428,13 +2428,13 @@ __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
break;
pci_unmap_single(
- ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ (blockpool->hldev)->pdev,
((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
((struct __vxge_hw_blockpool_entry *)p)->length,
PCI_DMA_BIDIRECTIONAL);
vxge_os_dma_free(
- ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ (blockpool->hldev)->pdev,
((struct __vxge_hw_blockpool_entry *)p)->memblock,
&((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
@@ -4059,7 +4059,7 @@ __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_virtualpath *vpath;
- vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
+ vpath = &hldev->virtual_paths[vp_id];
if (vpath->ringh) {
status = __vxge_hw_ring_reset(vpath->ringh);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
index 5046a64..9e0c1ee 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
@@ -1922,7 +1922,7 @@ realloc:
/* misaligned, free current one and try allocating
* size + VXGE_CACHE_LINE_SIZE memory
*/
- kfree((void *) vaddr);
+ kfree(vaddr);
size += VXGE_CACHE_LINE_SIZE;
realloc_flag = 1;
goto realloc;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 51387c3..4e20c5f 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1134,7 +1134,7 @@ static void vxge_set_multicast(struct net_device *dev)
"%s:%d", __func__, __LINE__);
vdev = netdev_priv(dev);
- hldev = (struct __vxge_hw_device *)vdev->devh;
+ hldev = vdev->devh;
if (unlikely(!is_vxge_card_up(vdev)))
return;
@@ -3687,7 +3687,8 @@ static int __devinit vxge_config_vpaths(
return 0;
if (!driver_config->g_no_cpus)
- driver_config->g_no_cpus = num_online_cpus();
+ driver_config->g_no_cpus =
+ netif_get_num_default_rss_queues();
driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
if (!driver_config->vpath_per_dev)
@@ -3989,16 +3990,16 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
continue;
vxge_debug_ll_config(VXGE_TRACE,
"%s: MTU size - %d", vdev->ndev->name,
- ((struct __vxge_hw_device *)(vdev->devh))->
+ ((vdev->devh))->
config.vp_config[i].mtu);
vxge_debug_init(VXGE_TRACE,
"%s: VLAN tag stripping %s", vdev->ndev->name,
- ((struct __vxge_hw_device *)(vdev->devh))->
+ ((vdev->devh))->
config.vp_config[i].rpa_strip_vlan_tag
? "Enabled" : "Disabled");
vxge_debug_ll_config(VXGE_TRACE,
"%s: Max frags : %d", vdev->ndev->name,
- ((struct __vxge_hw_device *)(vdev->devh))->
+ ((vdev->devh))->
config.vp_config[i].fifo.max_frags);
break;
}
@@ -4260,9 +4261,7 @@ static int vxge_probe_fw_update(struct vxgedev *vdev)
if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
VXGE_FW_VER(maj, min, 0)) {
vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
- " be used with this driver.\n"
- "Please get the latest version from "
- "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
+ " be used with this driver.",
VXGE_DRIVER_NAME, maj, min, bld);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
index 35f3e75..36ca40f 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
@@ -430,8 +430,7 @@ void vxge_initialize_ethtool_ops(struct net_device *ndev);
enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
-/**
- * #define VXGE_DEBUG_INIT: debug for initialization functions
+/* #define VXGE_DEBUG_INIT: debug for initialization functions
* #define VXGE_DEBUG_TX : debug transmit related functions
* #define VXGE_DEBUG_RX : debug recevice related functions
* #define VXGE_DEBUG_MEM : debug memory module
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index 5954fa2..99749bd 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -533,8 +533,7 @@ __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
/* notify driver */
if (hldev->uld_callbacks->crit_err)
- hldev->uld_callbacks->crit_err(
- (struct __vxge_hw_device *)hldev,
+ hldev->uld_callbacks->crit_err(hldev,
type, vp_id);
out:
@@ -1322,7 +1321,7 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
/* check whether it is not the end */
if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
- vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
+ vxge_assert((rxdp)->host_control !=
0);
++ring->cmpl_cnt;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 928913c..8b7c512 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3218,7 +3218,7 @@ static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
}
/**
- * nv_update_linkspeed: Setup the MAC according to the link partner
+ * nv_update_linkspeed - Setup the MAC according to the link partner
* @dev: Network device to be configured
*
* The function queries the PHY and checks if there is a link partner.
@@ -3552,8 +3552,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
return IRQ_HANDLED;
}
-/**
- * All _optimized functions are used to help increase performance
+/* All _optimized functions are used to help increase performance
* (reduce CPU and increase throughput). They use descripter version 3,
* compiler directives, and reduce memory accesses.
*/
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 8d2666f..e7d2496 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -52,7 +52,6 @@
#define MODNAME "lpc-eth"
#define DRV_VERSION "1.00"
-#define PHYDEF_ADDR 0x00
#define ENET_MAXF_SIZE 1536
#define ENET_RX_DESC 48
@@ -416,9 +415,6 @@ static bool use_iram_for_net(struct device *dev)
#define TXDESC_CONTROL_LAST (1 << 30)
#define TXDESC_CONTROL_INT (1 << 31)
-static int lpc_eth_hard_start_xmit(struct sk_buff *skb,
- struct net_device *ndev);
-
/*
* Structure of a TX/RX descriptors and RX status
*/
@@ -440,7 +436,7 @@ struct netdata_local {
spinlock_t lock;
void __iomem *net_base;
u32 msg_enable;
- struct sk_buff *skb[ENET_TX_DESC];
+ unsigned int skblen[ENET_TX_DESC];
unsigned int last_tx_idx;
unsigned int num_used_tx_buffs;
struct mii_bus *mii_bus;
@@ -903,12 +899,11 @@ err_out:
static void __lpc_handle_xmit(struct net_device *ndev)
{
struct netdata_local *pldat = netdev_priv(ndev);
- struct sk_buff *skb;
u32 txcidx, *ptxstat, txstat;
txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
while (pldat->last_tx_idx != txcidx) {
- skb = pldat->skb[pldat->last_tx_idx];
+ unsigned int skblen = pldat->skblen[pldat->last_tx_idx];
/* A buffer is available, get buffer status */
ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx];
@@ -945,17 +940,16 @@ static void __lpc_handle_xmit(struct net_device *ndev)
} else {
/* Update stats */
ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
-
- /* Free buffer */
- dev_kfree_skb_irq(skb);
+ ndev->stats.tx_bytes += skblen;
}
txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
}
- if (netif_queue_stopped(ndev))
- netif_wake_queue(ndev);
+ if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) {
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+ }
}
static int __lpc_handle_recv(struct net_device *ndev, int budget)
@@ -1132,7 +1126,7 @@ static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len);
/* Save the buffer and increment the buffer counter */
- pldat->skb[txidx] = skb;
+ pldat->skblen[txidx] = len;
pldat->num_used_tx_buffs++;
/* Start transmit */
@@ -1147,6 +1141,7 @@ static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_unlock_irq(&pldat->lock);
+ dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1320,6 +1315,7 @@ static const struct net_device_ops lpc_netdev_ops = {
.ndo_set_rx_mode = lpc_eth_set_multicast_list,
.ndo_do_ioctl = lpc_eth_ioctl,
.ndo_set_mac_address = lpc_set_mac_address,
+ .ndo_change_mtu = eth_change_mtu,
};
static int lpc_eth_drv_probe(struct platform_device *pdev)
@@ -1441,7 +1437,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
res->start);
netdev_dbg(ndev, "IO address size :%d\n",
res->end - res->start + 1);
- netdev_err(ndev, "IO address (mapped) :0x%p\n",
+ netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
pldat->net_base);
netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
index e48f084..5ae03e8 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
@@ -60,7 +60,7 @@ static void pch_gbe_plat_get_bus_info(struct pch_gbe_hw *hw)
/**
* pch_gbe_plat_init_hw - Initialize hardware
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed-EBUSY
*/
@@ -108,7 +108,7 @@ static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw)
/**
* pch_gbe_hal_setup_init_funcs - Initializes function pointers
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successfully
* ENOSYS: Function is not registered
*/
@@ -137,7 +137,7 @@ inline void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw)
/**
* pch_gbe_hal_init_hw - Initialize hardware
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successfully
* ENOSYS: Function is not registered
*/
@@ -155,7 +155,7 @@ inline s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw)
* @hw: Pointer to the HW structure
* @offset: The register to read
* @data: The buffer to store the 16-bit read.
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -172,7 +172,7 @@ inline s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset,
* @hw: Pointer to the HW structure
* @offset: The register to read
* @data: The value to write.
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -211,7 +211,7 @@ inline void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw)
/**
* pch_gbe_hal_read_mac_addr - Reads MAC address
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successfully
* ENOSYS: Function is not registered
*/
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index ac4e72d..9dbf38c 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -77,7 +77,7 @@ static const struct pch_gbe_stats pch_gbe_gstrings_stats[] = {
* pch_gbe_get_settings - Get device-specific settings
* @netdev: Network interface device structure
* @ecmd: Ethtool command
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -100,7 +100,7 @@ static int pch_gbe_get_settings(struct net_device *netdev,
* pch_gbe_set_settings - Set device-specific settings
* @netdev: Network interface device structure
* @ecmd: Ethtool command
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -220,7 +220,7 @@ static void pch_gbe_get_wol(struct net_device *netdev,
* pch_gbe_set_wol - Turn Wake-on-Lan on or off
* @netdev: Network interface device structure
* @wol: Pointer of wake-on-Lan information straucture
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -248,7 +248,7 @@ static int pch_gbe_set_wol(struct net_device *netdev,
/**
* pch_gbe_nway_reset - Restart autonegotiation
* @netdev: Network interface device structure
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -398,7 +398,7 @@ static void pch_gbe_get_pauseparam(struct net_device *netdev,
* pch_gbe_set_pauseparam - Set pause paramters
* @netdev: Network interface device structure
* @pause: Pause parameters structure
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 3787c64..b100656 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -301,7 +301,7 @@ inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
/**
* pch_gbe_mac_read_mac_addr - Read MAC address
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successful.
*/
s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
@@ -483,7 +483,7 @@ static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
/**
* pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -639,7 +639,7 @@ static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
/**
* pch_gbe_alloc_queues - Allocate memory for all rings
* @adapter: Board private structure to initialize
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -670,7 +670,7 @@ static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
/**
* pch_gbe_init_phy - Initialize PHY
* @adapter: Board private structure to initialize
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -720,7 +720,7 @@ static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
* @netdev: Network interface device structure
* @addr: Phy ID
* @reg: Access location
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -1364,7 +1364,7 @@ static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
* pch_gbe_intr - Interrupt Handler
* @irq: Interrupt number
* @data: Pointer to a network interface device structure
- * Returns
+ * Returns:
* - IRQ_HANDLED: Our interrupt
* - IRQ_NONE: Not our interrupt
*/
@@ -1566,7 +1566,7 @@ static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
* pch_gbe_clean_tx - Reclaim resources after transmit completes
* @adapter: Board private structure
* @tx_ring: Tx descriptor ring
- * Returns
+ * Returns:
* true: Cleaned the descriptor
* false: Not cleaned the descriptor
*/
@@ -1660,7 +1660,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
* @rx_ring: Rx descriptor ring
* @work_done: Completed count
* @work_to_do: Request count
- * Returns
+ * Returns:
* true: Cleaned the descriptor
* false: Not cleaned the descriptor
*/
@@ -1775,7 +1775,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
* pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
* @adapter: Board private structure
* @tx_ring: Tx descriptor ring (for a specific queue) to setup
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -1822,7 +1822,7 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
* pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
* @adapter: Board private structure
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -1899,7 +1899,7 @@ void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
/**
* pch_gbe_request_irq - Allocate an interrupt line
* @adapter: Board private structure
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -1932,7 +1932,7 @@ static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
/**
* pch_gbe_up - Up GbE network device
* @adapter: Board private structure
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -2018,7 +2018,7 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)
/**
* pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
* @adapter: Board private structure to initialize
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -2057,7 +2057,7 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
/**
* pch_gbe_open - Called when a network interface is made active
* @netdev: Network interface device structure
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -2097,7 +2097,7 @@ err_setup_tx:
/**
* pch_gbe_stop - Disables a network interface
* @netdev: Network interface device structure
- * Returns
+ * Returns:
* 0: Successfully
*/
static int pch_gbe_stop(struct net_device *netdev)
@@ -2117,7 +2117,7 @@ static int pch_gbe_stop(struct net_device *netdev)
* pch_gbe_xmit_frame - Packet transmitting start
* @skb: Socket buffer structure
* @netdev: Network interface device structure
- * Returns
+ * Returns:
* - NETDEV_TX_OK: Normal end
* - NETDEV_TX_BUSY: Error end
*/
@@ -2225,7 +2225,7 @@ static void pch_gbe_set_multi(struct net_device *netdev)
* pch_gbe_set_mac - Change the Ethernet Address of the NIC
* @netdev: Network interface device structure
* @addr: Pointer to an address structure
- * Returns
+ * Returns:
* 0: Successfully
* -EADDRNOTAVAIL: Failed
*/
@@ -2256,7 +2256,7 @@ static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
* pch_gbe_change_mtu - Change the Maximum Transfer Unit
* @netdev: Network interface device structure
* @new_mtu: New value for maximum frame size
- * Returns
+ * Returns:
* 0: Successfully
* -EINVAL: Failed
*/
@@ -2309,7 +2309,7 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
* pch_gbe_set_features - Reset device after features changed
* @netdev: Network interface device structure
* @features: New features
- * Returns
+ * Returns:
* 0: HW state updated successfully
*/
static int pch_gbe_set_features(struct net_device *netdev,
@@ -2334,7 +2334,7 @@ static int pch_gbe_set_features(struct net_device *netdev,
* @netdev: Network interface device structure
* @ifr: Pointer to ifr structure
* @cmd: Control command
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -2369,7 +2369,7 @@ static void pch_gbe_tx_timeout(struct net_device *netdev)
* pch_gbe_napi_poll - NAPI receive and transfer polling callback
* @napi: Pointer of polling device struct
* @budget: The maximum number of a packet
- * Returns
+ * Returns:
* false: Exit the polling mode
* true: Continue the polling mode
*/
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index 29e23be..8653c3b 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -139,7 +139,7 @@ MODULE_PARM_DESC(XsumTX, "Disable or enable Transmit Checksum offload");
/**
* pch_gbe_option - Force the MAC's flow control settings
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -220,7 +220,7 @@ static const struct pch_gbe_opt_list fc_list[] = {
* @value: value
* @opt: option
* @adapter: Board private structure
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 37ccbe5..eb3dfdb 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 79
-#define NETXEN_NIC_LINUX_VERSIONID "4.0.79"
+#define _NETXEN_NIC_LINUX_SUBVERSION 80
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.80"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 3973040..10468e7 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -489,7 +489,7 @@ netxen_nic_get_pauseparam(struct net_device *dev,
int port = adapter->physical_port;
if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
- if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
return;
/* get flow control settings */
val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
@@ -511,7 +511,7 @@ netxen_nic_get_pauseparam(struct net_device *dev,
break;
}
} else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
- if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS))
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
return;
pause->rx_pause = 1;
val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
@@ -534,7 +534,7 @@ netxen_nic_set_pauseparam(struct net_device *dev,
int port = adapter->physical_port;
/* read mode */
if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
- if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
return -EIO;
/* set flow control */
val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
@@ -577,7 +577,7 @@ netxen_nic_set_pauseparam(struct net_device *dev,
}
NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val);
} else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
- if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS))
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
return -EIO;
val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
if (port == 0) {
@@ -826,7 +826,12 @@ netxen_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
dump->len = mdump->md_dump_size;
else
dump->len = 0;
- dump->flag = mdump->md_capture_mask;
+
+ if (!mdump->md_enabled)
+ dump->flag = ETH_FW_DUMP_DISABLE;
+ else
+ dump->flag = mdump->md_capture_mask;
+
dump->version = adapter->fw_version;
return 0;
}
@@ -840,8 +845,10 @@ netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val)
switch (val->flag) {
case NX_FORCE_FW_DUMP_KEY:
- if (!mdump->md_enabled)
- mdump->md_enabled = 1;
+ if (!mdump->md_enabled) {
+ netdev_info(netdev, "FW dump not enabled\n");
+ return 0;
+ }
if (adapter->fw_mdump_rdy) {
netdev_info(netdev, "Previous dump not cleared, not forcing dump\n");
return 0;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index de96a94..946160f 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -365,7 +365,7 @@ static int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
return 0;
- if (port > NETXEN_NIU_MAX_XG_PORTS)
+ if (port >= NETXEN_NIU_MAX_XG_PORTS)
return -EINVAL;
mac_cfg = 0;
@@ -392,7 +392,7 @@ static int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
u32 port = adapter->physical_port;
u16 board_type = adapter->ahw.board_type;
- if (port > NETXEN_NIU_MAX_XG_PORTS)
+ if (port >= NETXEN_NIU_MAX_XG_PORTS)
return -EINVAL;
mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port));
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 8694124..bc165f4 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1437,8 +1437,6 @@ netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg)
netdev->name, cable_len);
}
- netxen_advert_link_change(adapter, link_status);
-
/* update link parameters */
if (duplex == LINKEVENT_FULL_DUPLEX)
adapter->link_duplex = DUPLEX_FULL;
@@ -1447,6 +1445,8 @@ netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg)
adapter->module_type = module;
adapter->link_autoneg = autoneg;
adapter->link_speed = link_speed;
+
+ netxen_advert_link_change(adapter, link_status);
}
static void
@@ -1532,8 +1532,6 @@ static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
} else
skb->ip_summed = CHECKSUM_NONE;
- skb->dev = adapter->netdev;
-
buffer->skb = NULL;
no_skb:
buffer->state = NETXEN_BUFFER_FREE;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 8680a5d..eaa1db9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -36,8 +36,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 28
-#define QLCNIC_LINUX_VERSIONID "5.0.28"
+#define _QLCNIC_LINUX_SUBVERSION 29
+#define QLCNIC_LINUX_VERSIONID "5.0.29"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -258,6 +258,8 @@ struct rcv_desc {
(((sts_data) >> 52) & 0x1)
#define qlcnic_get_lro_sts_seq_number(sts_data) \
((sts_data) & 0x0FFFFFFFF)
+#define qlcnic_get_lro_sts_mss(sts_data1) \
+ ((sts_data1 >> 32) & 0x0FFFF)
struct status_desc {
@@ -610,7 +612,11 @@ struct qlcnic_recv_context {
#define QLCNIC_CDRP_CMD_GET_MAC_STATS 0x00000037
#define QLCNIC_RCODE_SUCCESS 0
+#define QLCNIC_RCODE_INVALID_ARGS 6
#define QLCNIC_RCODE_NOT_SUPPORTED 9
+#define QLCNIC_RCODE_NOT_PERMITTED 10
+#define QLCNIC_RCODE_NOT_IMPL 15
+#define QLCNIC_RCODE_INVALID 16
#define QLCNIC_RCODE_TIMEOUT 17
#define QLCNIC_DESTROY_CTX_RESET 0
@@ -623,6 +629,7 @@ struct qlcnic_recv_context {
#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
#define QLCNIC_CAP0_VALIDOFF (1 << 11)
+#define QLCNIC_CAP0_LRO_MSS (1 << 21)
/*
* Context state
@@ -829,6 +836,9 @@ struct qlcnic_mac_list_s {
#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9
#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10
#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27
+#define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31
+
+#define QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG BIT_2
/* module types */
#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -918,6 +928,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_NEED_FLR 0x1000
#define QLCNIC_FW_RESET_OWNER 0x2000
#define QLCNIC_FW_HANG 0x4000
+#define QLCNIC_FW_LRO_MSS_CAP 0x8000
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 8db8524..b8ead69 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -53,12 +53,39 @@ qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd)
rsp = qlcnic_poll_rsp(adapter);
if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
- dev_err(&pdev->dev, "card response timeout.\n");
+ dev_err(&pdev->dev, "CDRP response timeout.\n");
cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT;
} else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
cmd->rsp.cmd = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
- dev_err(&pdev->dev, "failed card response code:0x%x\n",
+ switch (cmd->rsp.cmd) {
+ case QLCNIC_RCODE_INVALID_ARGS:
+ dev_err(&pdev->dev, "CDRP invalid args: 0x%x.\n",
cmd->rsp.cmd);
+ break;
+ case QLCNIC_RCODE_NOT_SUPPORTED:
+ case QLCNIC_RCODE_NOT_IMPL:
+ dev_err(&pdev->dev,
+ "CDRP command not supported: 0x%x.\n",
+ cmd->rsp.cmd);
+ break;
+ case QLCNIC_RCODE_NOT_PERMITTED:
+ dev_err(&pdev->dev,
+ "CDRP requested action not permitted: 0x%x.\n",
+ cmd->rsp.cmd);
+ break;
+ case QLCNIC_RCODE_INVALID:
+ dev_err(&pdev->dev,
+ "CDRP invalid or unknown cmd received: 0x%x.\n",
+ cmd->rsp.cmd);
+ break;
+ case QLCNIC_RCODE_TIMEOUT:
+ dev_err(&pdev->dev, "CDRP command timeout: 0x%x.\n",
+ cmd->rsp.cmd);
+ break;
+ default:
+ dev_err(&pdev->dev, "CDRP command failed: 0x%x.\n",
+ cmd->rsp.cmd);
+ }
} else if (rsp == QLCNIC_CDRP_RSP_OK) {
cmd->rsp.cmd = QLCNIC_RCODE_SUCCESS;
if (cmd->rsp.arg2)
@@ -237,6 +264,9 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
| QLCNIC_CAP0_VALIDOFF);
cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+ cap |= QLCNIC_CAP0_LRO_MSS;
+
prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx,
msix_handler);
prq->txrx_sds_binding = nsds_rings - 1;
@@ -954,9 +984,6 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber);
mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped);
mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error);
- } else {
- dev_info(&adapter->pdev->dev,
- "%s: Get mac stats failed =%d.\n", __func__, err);
}
dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index 6ced319..28a6b28 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -588,6 +588,7 @@ enum {
#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
+#define CRB_FW_CAPABILITIES_2 (QLCNIC_CAM_RAM(0x12c))
#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
/*
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index 799fd40..0bcda9c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -1488,8 +1488,6 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
skb_checksum_none_assert(skb);
}
- skb->dev = adapter->netdev;
-
buffer->skb = NULL;
return skb;
@@ -1653,6 +1651,9 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
length = skb->len;
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+ skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
+
if (vid != 0xffff)
__vlan_hwaccel_put_tag(skb, vid);
netif_receive_skb(skb);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 46e77a2..212c121 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -479,7 +479,7 @@ qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
pfn = pci_info[i].id;
- if (pfn > QLCNIC_MAX_PCI_FUNC) {
+ if (pfn >= QLCNIC_MAX_PCI_FUNC) {
ret = QL_STATUS_INVALID_PARAM;
goto err_eswitch;
}
@@ -1136,6 +1136,8 @@ static int
__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
int ring;
+ u32 capab2;
+
struct qlcnic_host_rds_ring *rds_ring;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
@@ -1146,6 +1148,12 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (qlcnic_set_eswitch_port_config(adapter))
return -EIO;
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
+ capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
+ if (capab2 & QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
+ adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
+ }
+
if (qlcnic_fw_create_ctx(adapter))
return -EIO;
@@ -1215,6 +1223,7 @@ __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_napi_disable(adapter);
qlcnic_fw_destroy_ctx(adapter);
+ adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP;
qlcnic_reset_rx_buffers_list(adapter);
qlcnic_release_tx_buffers(adapter);
@@ -2024,6 +2033,7 @@ qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
vh = (struct vlan_ethhdr *)skb->data;
flags = FLAGS_VLAN_TAGGED;
vlan_tci = vh->h_vlan_TCI;
+ protocol = ntohs(vh->h_vlan_encapsulated_proto);
} else if (vlan_tx_tag_present(skb)) {
flags = FLAGS_VLAN_OOB;
vlan_tci = vlan_tx_tag_get(skb);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 5a639df..a131d7b 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,13 +18,15 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "v1.00.00.30.00.00-01"
+#define DRV_VERSION "v1.00.00.31"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
#define QLGE_VENDOR_ID 0x1077
#define QLGE_DEVICE_ID_8012 0x8012
#define QLGE_DEVICE_ID_8000 0x8000
+#define QLGE_MEZZ_SSYS_ID_068 0x0068
+#define QLGE_MEZZ_SSYS_ID_180 0x0180
#define MAX_CPUS 8
#define MAX_TX_RINGS MAX_CPUS
#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1)
@@ -1397,7 +1399,6 @@ struct tx_ring {
struct tx_ring_desc *q; /* descriptor list for the queue */
spinlock_t lock;
atomic_t tx_count; /* counts down for every outstanding IO */
- atomic_t queue_stopped; /* Turns queue off when full. */
struct delayed_work tx_work;
struct ql_adapter *qdev;
u64 tx_packets;
@@ -1535,6 +1536,14 @@ struct nic_stats {
u64 rx_1024_to_1518_pkts;
u64 rx_1519_to_max_pkts;
u64 rx_len_err_pkts;
+ /* Receive Mac Err stats */
+ u64 rx_code_err;
+ u64 rx_oversize_err;
+ u64 rx_undersize_err;
+ u64 rx_preamble_err;
+ u64 rx_frame_len_err;
+ u64 rx_crc_err;
+ u64 rx_err_count;
/*
* These stats come from offset 500h to 5C8h
* in the XGMAC register.
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 8e2c2a7..3d4462b 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -35,10 +35,152 @@
#include "qlge.h"
+struct ql_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define QL_SIZEOF(m) FIELD_SIZEOF(struct ql_adapter, m)
+#define QL_OFF(m) offsetof(struct ql_adapter, m)
+
+static const struct ql_stats ql_gstrings_stats[] = {
+ {"tx_pkts", QL_SIZEOF(nic_stats.tx_pkts), QL_OFF(nic_stats.tx_pkts)},
+ {"tx_bytes", QL_SIZEOF(nic_stats.tx_bytes), QL_OFF(nic_stats.tx_bytes)},
+ {"tx_mcast_pkts", QL_SIZEOF(nic_stats.tx_mcast_pkts),
+ QL_OFF(nic_stats.tx_mcast_pkts)},
+ {"tx_bcast_pkts", QL_SIZEOF(nic_stats.tx_bcast_pkts),
+ QL_OFF(nic_stats.tx_bcast_pkts)},
+ {"tx_ucast_pkts", QL_SIZEOF(nic_stats.tx_ucast_pkts),
+ QL_OFF(nic_stats.tx_ucast_pkts)},
+ {"tx_ctl_pkts", QL_SIZEOF(nic_stats.tx_ctl_pkts),
+ QL_OFF(nic_stats.tx_ctl_pkts)},
+ {"tx_pause_pkts", QL_SIZEOF(nic_stats.tx_pause_pkts),
+ QL_OFF(nic_stats.tx_pause_pkts)},
+ {"tx_64_pkts", QL_SIZEOF(nic_stats.tx_64_pkt),
+ QL_OFF(nic_stats.tx_64_pkt)},
+ {"tx_65_to_127_pkts", QL_SIZEOF(nic_stats.tx_65_to_127_pkt),
+ QL_OFF(nic_stats.tx_65_to_127_pkt)},
+ {"tx_128_to_255_pkts", QL_SIZEOF(nic_stats.tx_128_to_255_pkt),
+ QL_OFF(nic_stats.tx_128_to_255_pkt)},
+ {"tx_256_511_pkts", QL_SIZEOF(nic_stats.tx_256_511_pkt),
+ QL_OFF(nic_stats.tx_256_511_pkt)},
+ {"tx_512_to_1023_pkts", QL_SIZEOF(nic_stats.tx_512_to_1023_pkt),
+ QL_OFF(nic_stats.tx_512_to_1023_pkt)},
+ {"tx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.tx_1024_to_1518_pkt),
+ QL_OFF(nic_stats.tx_1024_to_1518_pkt)},
+ {"tx_1519_to_max_pkts", QL_SIZEOF(nic_stats.tx_1519_to_max_pkt),
+ QL_OFF(nic_stats.tx_1519_to_max_pkt)},
+ {"tx_undersize_pkts", QL_SIZEOF(nic_stats.tx_undersize_pkt),
+ QL_OFF(nic_stats.tx_undersize_pkt)},
+ {"tx_oversize_pkts", QL_SIZEOF(nic_stats.tx_oversize_pkt),
+ QL_OFF(nic_stats.tx_oversize_pkt)},
+ {"rx_bytes", QL_SIZEOF(nic_stats.rx_bytes), QL_OFF(nic_stats.rx_bytes)},
+ {"rx_bytes_ok", QL_SIZEOF(nic_stats.rx_bytes_ok),
+ QL_OFF(nic_stats.rx_bytes_ok)},
+ {"rx_pkts", QL_SIZEOF(nic_stats.rx_pkts), QL_OFF(nic_stats.rx_pkts)},
+ {"rx_pkts_ok", QL_SIZEOF(nic_stats.rx_pkts_ok),
+ QL_OFF(nic_stats.rx_pkts_ok)},
+ {"rx_bcast_pkts", QL_SIZEOF(nic_stats.rx_bcast_pkts),
+ QL_OFF(nic_stats.rx_bcast_pkts)},
+ {"rx_mcast_pkts", QL_SIZEOF(nic_stats.rx_mcast_pkts),
+ QL_OFF(nic_stats.rx_mcast_pkts)},
+ {"rx_ucast_pkts", QL_SIZEOF(nic_stats.rx_ucast_pkts),
+ QL_OFF(nic_stats.rx_ucast_pkts)},
+ {"rx_undersize_pkts", QL_SIZEOF(nic_stats.rx_undersize_pkts),
+ QL_OFF(nic_stats.rx_undersize_pkts)},
+ {"rx_oversize_pkts", QL_SIZEOF(nic_stats.rx_oversize_pkts),
+ QL_OFF(nic_stats.rx_oversize_pkts)},
+ {"rx_jabber_pkts", QL_SIZEOF(nic_stats.rx_jabber_pkts),
+ QL_OFF(nic_stats.rx_jabber_pkts)},
+ {"rx_undersize_fcerr_pkts",
+ QL_SIZEOF(nic_stats.rx_undersize_fcerr_pkts),
+ QL_OFF(nic_stats.rx_undersize_fcerr_pkts)},
+ {"rx_drop_events", QL_SIZEOF(nic_stats.rx_drop_events),
+ QL_OFF(nic_stats.rx_drop_events)},
+ {"rx_fcerr_pkts", QL_SIZEOF(nic_stats.rx_fcerr_pkts),
+ QL_OFF(nic_stats.rx_fcerr_pkts)},
+ {"rx_align_err", QL_SIZEOF(nic_stats.rx_align_err),
+ QL_OFF(nic_stats.rx_align_err)},
+ {"rx_symbol_err", QL_SIZEOF(nic_stats.rx_symbol_err),
+ QL_OFF(nic_stats.rx_symbol_err)},
+ {"rx_mac_err", QL_SIZEOF(nic_stats.rx_mac_err),
+ QL_OFF(nic_stats.rx_mac_err)},
+ {"rx_ctl_pkts", QL_SIZEOF(nic_stats.rx_ctl_pkts),
+ QL_OFF(nic_stats.rx_ctl_pkts)},
+ {"rx_pause_pkts", QL_SIZEOF(nic_stats.rx_pause_pkts),
+ QL_OFF(nic_stats.rx_pause_pkts)},
+ {"rx_64_pkts", QL_SIZEOF(nic_stats.rx_64_pkts),
+ QL_OFF(nic_stats.rx_64_pkts)},
+ {"rx_65_to_127_pkts", QL_SIZEOF(nic_stats.rx_65_to_127_pkts),
+ QL_OFF(nic_stats.rx_65_to_127_pkts)},
+ {"rx_128_255_pkts", QL_SIZEOF(nic_stats.rx_128_255_pkts),
+ QL_OFF(nic_stats.rx_128_255_pkts)},
+ {"rx_256_511_pkts", QL_SIZEOF(nic_stats.rx_256_511_pkts),
+ QL_OFF(nic_stats.rx_256_511_pkts)},
+ {"rx_512_to_1023_pkts", QL_SIZEOF(nic_stats.rx_512_to_1023_pkts),
+ QL_OFF(nic_stats.rx_512_to_1023_pkts)},
+ {"rx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.rx_1024_to_1518_pkts),
+ QL_OFF(nic_stats.rx_1024_to_1518_pkts)},
+ {"rx_1519_to_max_pkts", QL_SIZEOF(nic_stats.rx_1519_to_max_pkts),
+ QL_OFF(nic_stats.rx_1519_to_max_pkts)},
+ {"rx_len_err_pkts", QL_SIZEOF(nic_stats.rx_len_err_pkts),
+ QL_OFF(nic_stats.rx_len_err_pkts)},
+ {"rx_code_err", QL_SIZEOF(nic_stats.rx_code_err),
+ QL_OFF(nic_stats.rx_code_err)},
+ {"rx_oversize_err", QL_SIZEOF(nic_stats.rx_oversize_err),
+ QL_OFF(nic_stats.rx_oversize_err)},
+ {"rx_undersize_err", QL_SIZEOF(nic_stats.rx_undersize_err),
+ QL_OFF(nic_stats.rx_undersize_err)},
+ {"rx_preamble_err", QL_SIZEOF(nic_stats.rx_preamble_err),
+ QL_OFF(nic_stats.rx_preamble_err)},
+ {"rx_frame_len_err", QL_SIZEOF(nic_stats.rx_frame_len_err),
+ QL_OFF(nic_stats.rx_frame_len_err)},
+ {"rx_crc_err", QL_SIZEOF(nic_stats.rx_crc_err),
+ QL_OFF(nic_stats.rx_crc_err)},
+ {"rx_err_count", QL_SIZEOF(nic_stats.rx_err_count),
+ QL_OFF(nic_stats.rx_err_count)},
+ {"tx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames0),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames0)},
+ {"tx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames1),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames1)},
+ {"tx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames2),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames2)},
+ {"tx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames3),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames3)},
+ {"tx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames4),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames4)},
+ {"tx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames5),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames5)},
+ {"tx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames6),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames6)},
+ {"tx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames7),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames7)},
+ {"rx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames0),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames0)},
+ {"rx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames1),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames1)},
+ {"rx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames2),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames2)},
+ {"rx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames3),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames3)},
+ {"rx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames4),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames4)},
+ {"rx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames5),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames5)},
+ {"rx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames6),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames6)},
+ {"rx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames7),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames7)},
+ {"rx_nic_fifo_drop", QL_SIZEOF(nic_stats.rx_nic_fifo_drop),
+ QL_OFF(nic_stats.rx_nic_fifo_drop)},
+};
+
static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
"Loopback test (offline)"
};
#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
+#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats)
static int ql_update_ring_coalescing(struct ql_adapter *qdev)
{
@@ -183,73 +325,19 @@ quit:
QL_DUMP_STAT(qdev);
}
-static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
- {"tx_pkts"},
- {"tx_bytes"},
- {"tx_mcast_pkts"},
- {"tx_bcast_pkts"},
- {"tx_ucast_pkts"},
- {"tx_ctl_pkts"},
- {"tx_pause_pkts"},
- {"tx_64_pkts"},
- {"tx_65_to_127_pkts"},
- {"tx_128_to_255_pkts"},
- {"tx_256_511_pkts"},
- {"tx_512_to_1023_pkts"},
- {"tx_1024_to_1518_pkts"},
- {"tx_1519_to_max_pkts"},
- {"tx_undersize_pkts"},
- {"tx_oversize_pkts"},
- {"rx_bytes"},
- {"rx_bytes_ok"},
- {"rx_pkts"},
- {"rx_pkts_ok"},
- {"rx_bcast_pkts"},
- {"rx_mcast_pkts"},
- {"rx_ucast_pkts"},
- {"rx_undersize_pkts"},
- {"rx_oversize_pkts"},
- {"rx_jabber_pkts"},
- {"rx_undersize_fcerr_pkts"},
- {"rx_drop_events"},
- {"rx_fcerr_pkts"},
- {"rx_align_err"},
- {"rx_symbol_err"},
- {"rx_mac_err"},
- {"rx_ctl_pkts"},
- {"rx_pause_pkts"},
- {"rx_64_pkts"},
- {"rx_65_to_127_pkts"},
- {"rx_128_255_pkts"},
- {"rx_256_511_pkts"},
- {"rx_512_to_1023_pkts"},
- {"rx_1024_to_1518_pkts"},
- {"rx_1519_to_max_pkts"},
- {"rx_len_err_pkts"},
- {"tx_cbfc_pause_frames0"},
- {"tx_cbfc_pause_frames1"},
- {"tx_cbfc_pause_frames2"},
- {"tx_cbfc_pause_frames3"},
- {"tx_cbfc_pause_frames4"},
- {"tx_cbfc_pause_frames5"},
- {"tx_cbfc_pause_frames6"},
- {"tx_cbfc_pause_frames7"},
- {"rx_cbfc_pause_frames0"},
- {"rx_cbfc_pause_frames1"},
- {"rx_cbfc_pause_frames2"},
- {"rx_cbfc_pause_frames3"},
- {"rx_cbfc_pause_frames4"},
- {"rx_cbfc_pause_frames5"},
- {"rx_cbfc_pause_frames6"},
- {"rx_cbfc_pause_frames7"},
- {"rx_nic_fifo_drop"},
-};
-
static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
+ int index;
switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(buf, *ql_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN);
+ break;
case ETH_SS_STATS:
- memcpy(buf, ql_stats_str_arr, sizeof(ql_stats_str_arr));
+ for (index = 0; index < QLGE_STATS_LEN; index++) {
+ memcpy(buf + index * ETH_GSTRING_LEN,
+ ql_gstrings_stats[index].stat_string,
+ ETH_GSTRING_LEN);
+ }
break;
}
}
@@ -260,7 +348,7 @@ static int ql_get_sset_count(struct net_device *dev, int sset)
case ETH_SS_TEST:
return QLGE_TEST_LEN;
case ETH_SS_STATS:
- return ARRAY_SIZE(ql_stats_str_arr);
+ return QLGE_STATS_LEN;
default:
return -EOPNOTSUPP;
}
@@ -271,69 +359,17 @@ ql_get_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *stats, u64 *data)
{
struct ql_adapter *qdev = netdev_priv(ndev);
- struct nic_stats *s = &qdev->nic_stats;
+ int index, length;
+ length = QLGE_STATS_LEN;
ql_update_stats(qdev);
- *data++ = s->tx_pkts;
- *data++ = s->tx_bytes;
- *data++ = s->tx_mcast_pkts;
- *data++ = s->tx_bcast_pkts;
- *data++ = s->tx_ucast_pkts;
- *data++ = s->tx_ctl_pkts;
- *data++ = s->tx_pause_pkts;
- *data++ = s->tx_64_pkt;
- *data++ = s->tx_65_to_127_pkt;
- *data++ = s->tx_128_to_255_pkt;
- *data++ = s->tx_256_511_pkt;
- *data++ = s->tx_512_to_1023_pkt;
- *data++ = s->tx_1024_to_1518_pkt;
- *data++ = s->tx_1519_to_max_pkt;
- *data++ = s->tx_undersize_pkt;
- *data++ = s->tx_oversize_pkt;
- *data++ = s->rx_bytes;
- *data++ = s->rx_bytes_ok;
- *data++ = s->rx_pkts;
- *data++ = s->rx_pkts_ok;
- *data++ = s->rx_bcast_pkts;
- *data++ = s->rx_mcast_pkts;
- *data++ = s->rx_ucast_pkts;
- *data++ = s->rx_undersize_pkts;
- *data++ = s->rx_oversize_pkts;
- *data++ = s->rx_jabber_pkts;
- *data++ = s->rx_undersize_fcerr_pkts;
- *data++ = s->rx_drop_events;
- *data++ = s->rx_fcerr_pkts;
- *data++ = s->rx_align_err;
- *data++ = s->rx_symbol_err;
- *data++ = s->rx_mac_err;
- *data++ = s->rx_ctl_pkts;
- *data++ = s->rx_pause_pkts;
- *data++ = s->rx_64_pkts;
- *data++ = s->rx_65_to_127_pkts;
- *data++ = s->rx_128_255_pkts;
- *data++ = s->rx_256_511_pkts;
- *data++ = s->rx_512_to_1023_pkts;
- *data++ = s->rx_1024_to_1518_pkts;
- *data++ = s->rx_1519_to_max_pkts;
- *data++ = s->rx_len_err_pkts;
- *data++ = s->tx_cbfc_pause_frames0;
- *data++ = s->tx_cbfc_pause_frames1;
- *data++ = s->tx_cbfc_pause_frames2;
- *data++ = s->tx_cbfc_pause_frames3;
- *data++ = s->tx_cbfc_pause_frames4;
- *data++ = s->tx_cbfc_pause_frames5;
- *data++ = s->tx_cbfc_pause_frames6;
- *data++ = s->tx_cbfc_pause_frames7;
- *data++ = s->rx_cbfc_pause_frames0;
- *data++ = s->rx_cbfc_pause_frames1;
- *data++ = s->rx_cbfc_pause_frames2;
- *data++ = s->rx_cbfc_pause_frames3;
- *data++ = s->rx_cbfc_pause_frames4;
- *data++ = s->rx_cbfc_pause_frames5;
- *data++ = s->rx_cbfc_pause_frames6;
- *data++ = s->rx_cbfc_pause_frames7;
- *data++ = s->rx_nic_fifo_drop;
+ for (index = 0; index < length; index++) {
+ char *p = (char *)qdev +
+ ql_gstrings_stats[index].stat_offset;
+ *data++ = (ql_gstrings_stats[index].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : (*(u32 *)p);
+ }
}
static int ql_get_settings(struct net_device *ndev,
@@ -388,30 +424,33 @@ static void ql_get_drvinfo(struct net_device *ndev,
static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct ql_adapter *qdev = netdev_priv(ndev);
- /* What we support. */
- wol->supported = WAKE_MAGIC;
- /* What we've currently got set. */
- wol->wolopts = qdev->wol;
+ unsigned short ssys_dev = qdev->pdev->subsystem_device;
+
+ /* WOL is only supported for mezz card. */
+ if (ssys_dev == QLGE_MEZZ_SSYS_ID_068 ||
+ ssys_dev == QLGE_MEZZ_SSYS_ID_180) {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = qdev->wol;
+ }
}
static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct ql_adapter *qdev = netdev_priv(ndev);
- int status;
+ unsigned short ssys_dev = qdev->pdev->subsystem_device;
+ /* WOL is only supported for mezz card. */
+ if (ssys_dev != QLGE_MEZZ_SSYS_ID_068 ||
+ ssys_dev != QLGE_MEZZ_SSYS_ID_180) {
+ netif_info(qdev, drv, qdev->ndev,
+ "WOL is only supported for mezz card\n");
+ return -EOPNOTSUPP;
+ }
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
qdev->wol = wol->wolopts;
netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
- if (!qdev->wol) {
- u32 wol = 0;
- status = ql_mb_wol_mode(qdev, wol);
- netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n",
- status == 0 ? "cleared successfully" : "clear failed",
- wol);
- }
-
return 0;
}
@@ -528,6 +567,8 @@ static void ql_self_test(struct net_device *ndev,
{
struct ql_adapter *qdev = netdev_priv(ndev);
+ memset(data, 0, sizeof(u64) * QLGE_TEST_LEN);
+
if (netif_running(ndev)) {
set_bit(QL_SELFTEST, &qdev->flags);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 09d8d33..3769f57 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1433,6 +1433,36 @@ map_error:
return NETDEV_TX_BUSY;
}
+/* Categorizing receive firmware frame errors */
+static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
+{
+ struct nic_stats *stats = &qdev->nic_stats;
+
+ stats->rx_err_count++;
+
+ switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
+ case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
+ stats->rx_code_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
+ stats->rx_oversize_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
+ stats->rx_undersize_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
+ stats->rx_preamble_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
+ stats->rx_frame_len_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_CRC:
+ stats->rx_crc_err++;
+ default:
+ break;
+ }
+}
+
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
@@ -1499,15 +1529,6 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
addr = lbq_desc->p.pg_chunk.va;
prefetch(addr);
-
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- netif_info(qdev, drv, qdev->ndev,
- "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
- rx_ring->rx_errors++;
- goto err_out;
- }
-
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
@@ -1546,7 +1567,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
struct iphdr *iph =
(struct iphdr *) ((u8 *)addr + ETH_HLEN);
if (!(iph->frag_off &
- cpu_to_be16(IP_MF|IP_OFFSET))) {
+ htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
@@ -1593,15 +1614,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
memcpy(skb_put(new_skb, length), skb->data, length);
skb = new_skb;
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- netif_info(qdev, drv, qdev->ndev,
- "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
- dev_kfree_skb_any(skb);
- rx_ring->rx_errors++;
- return;
- }
-
/* loopback self test for ethtool */
if (test_bit(QL_SELFTEST, &qdev->flags)) {
ql_check_lb_frame(qdev, skb);
@@ -1619,7 +1631,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
}
prefetch(skb->data);
- skb->dev = ndev;
if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%s Multicast.\n",
@@ -1654,7 +1665,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph = (struct iphdr *) skb->data;
if (!(iph->frag_off &
- ntohs(IP_MF|IP_OFFSET))) {
+ htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
@@ -1908,15 +1919,6 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
return;
}
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- netif_info(qdev, drv, qdev->ndev,
- "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
- dev_kfree_skb_any(skb);
- rx_ring->rx_errors++;
- return;
- }
-
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
@@ -1934,7 +1936,6 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
}
prefetch(skb->data);
- skb->dev = ndev;
if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
@@ -1968,7 +1969,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph = (struct iphdr *) skb->data;
if (!(iph->frag_off &
- ntohs(IP_MF|IP_OFFSET))) {
+ htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"TCP checksum done!\n");
@@ -1999,6 +2000,12 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
+ return (unsigned long)length;
+ }
+
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
/* The data and headers are split into
* separate buffers.
@@ -2173,8 +2180,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
ql_write_cq_idx(rx_ring);
tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
- if (atomic_read(&tx_ring->queue_stopped) &&
- (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
+ if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
/*
* The queue got stopped because the tx_ring was full.
* Wake it up, because it's now at least 25% empty.
@@ -2558,10 +2564,9 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
netif_info(qdev, tx_queued, qdev->ndev,
- "%s: shutting down tx queue %d du to lack of resources.\n",
+ "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
__func__, tx_ring_idx);
netif_stop_subqueue(ndev, tx_ring->wq_id);
- atomic_inc(&tx_ring->queue_stopped);
tx_ring->tx_errors++;
return NETDEV_TX_BUSY;
}
@@ -2612,6 +2617,16 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
tx_ring->prod_idx, skb->len);
atomic_dec(&tx_ring->tx_count);
+
+ if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
+ netif_stop_subqueue(ndev, tx_ring->wq_id);
+ if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
+ /*
+ * The queue got stopped because the tx_ring was full.
+ * Wake it up, because it's now at least 25% empty.
+ */
+ netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
+ }
return NETDEV_TX_OK;
}
@@ -2680,7 +2695,6 @@ static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
tx_ring_desc++;
}
atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
- atomic_set(&tx_ring->queue_stopped, 0);
}
static void ql_free_tx_resources(struct ql_adapter *qdev,
@@ -2703,10 +2717,9 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
&tx_ring->wq_base_dma);
if ((tx_ring->wq_base == NULL) ||
- tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
- netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
- return -ENOMEM;
- }
+ tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
+ goto pci_alloc_err;
+
tx_ring->q =
kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
if (tx_ring->q == NULL)
@@ -2716,6 +2729,9 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
err:
pci_free_consistent(qdev->pdev, tx_ring->wq_size,
tx_ring->wq_base, tx_ring->wq_base_dma);
+ tx_ring->wq_base = NULL;
+pci_alloc_err:
+ netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
return -ENOMEM;
}
@@ -4649,7 +4665,7 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
int err = 0;
ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
- min(MAX_CPUS, (int)num_online_cpus()));
+ min(MAX_CPUS, netif_get_num_default_rss_queues()));
if (!ndev)
return -ENOMEM;
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index d1827e8..557a265 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1256,7 +1256,6 @@ static void __devexit r6040_remove_one(struct pci_dev *pdev)
kfree(lp->mii_bus->irq);
mdiobus_free(lp->mii_bus);
netif_napi_del(&lp->napi);
- pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, lp->base);
pci_release_regions(pdev);
free_netdev(dev);
@@ -1278,17 +1277,4 @@ static struct pci_driver r6040_driver = {
.remove = __devexit_p(r6040_remove_one),
};
-
-static int __init r6040_init(void)
-{
- return pci_register_driver(&r6040_driver);
-}
-
-
-static void __exit r6040_cleanup(void)
-{
- pci_unregister_driver(&r6040_driver);
-}
-
-module_init(r6040_init);
-module_exit(r6040_cleanup);
+module_pci_driver(r6040_driver);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 9757ce3..c29c5fb 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -46,6 +46,8 @@
#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
+#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
+#define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
#ifdef RTL8169_DEBUG
#define assert(expr) \
@@ -141,6 +143,9 @@ enum mac_version {
RTL_GIGA_MAC_VER_36,
RTL_GIGA_MAC_VER_37,
RTL_GIGA_MAC_VER_38,
+ RTL_GIGA_MAC_VER_39,
+ RTL_GIGA_MAC_VER_40,
+ RTL_GIGA_MAC_VER_41,
RTL_GIGA_MAC_NONE = 0xff,
};
@@ -259,6 +264,14 @@ static const struct {
[RTL_GIGA_MAC_VER_38] =
_R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_39] =
+ _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
+ JUMBO_1K, true),
+ [RTL_GIGA_MAC_VER_40] =
+ _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1,
+ JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_41] =
+ _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
};
#undef _R
@@ -389,8 +402,12 @@ enum rtl8168_8101_registers {
TWSI = 0xd2,
MCU = 0xd3,
#define NOW_IS_OOB (1 << 7)
+#define TX_EMPTY (1 << 5)
+#define RX_EMPTY (1 << 4)
+#define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
#define EN_NDP (1 << 3)
#define EN_OOB_RESET (1 << 2)
+#define LINK_LIST_RDY (1 << 1)
EFUSEAR = 0xdc,
#define EFUSEAR_FLAG 0x80000000
#define EFUSEAR_WRITE_CMD 0x80000000
@@ -416,6 +433,7 @@ enum rtl8168_registers {
#define ERIAR_MASK_SHIFT 12
#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
+#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
EPHY_RXER_NUM = 0x7c,
OCPDR = 0xb0, /* OCP GPHY access */
@@ -428,10 +446,14 @@ enum rtl8168_registers {
#define OCPAR_FLAG 0x80000000
#define OCPAR_GPHY_WRITE_CMD 0x8000f060
#define OCPAR_GPHY_READ_CMD 0x0000f060
+ GPHY_OCP = 0xb8,
RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
MISC = 0xf0, /* 8168e only. */
#define TXPLA_RST (1 << 29)
+#define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
#define PWM_EN (1 << 22)
+#define RXDV_GATED_EN (1 << 19)
+#define EARLY_TALLY_EN (1 << 16)
};
enum rtl_register_content {
@@ -721,8 +743,8 @@ struct rtl8169_private {
u16 event_slow;
struct mdio_ops {
- void (*write)(void __iomem *, int, int);
- int (*read)(void __iomem *, int);
+ void (*write)(struct rtl8169_private *, int, int);
+ int (*read)(struct rtl8169_private *, int);
} mdio_ops;
struct pll_power_ops {
@@ -736,8 +758,8 @@ struct rtl8169_private {
} jumbo_ops;
struct csi_ops {
- void (*write)(void __iomem *, int, int);
- u32 (*read)(void __iomem *, int);
+ void (*write)(struct rtl8169_private *, int, int);
+ u32 (*read)(struct rtl8169_private *, int);
} csi_ops;
int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
@@ -774,6 +796,8 @@ struct rtl8169_private {
} phy_action;
} *rtl_fw;
#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
+
+ u32 ocp_base;
};
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -794,6 +818,8 @@ MODULE_FIRMWARE(FIRMWARE_8168F_1);
MODULE_FIRMWARE(FIRMWARE_8168F_2);
MODULE_FIRMWARE(FIRMWARE_8402_1);
MODULE_FIRMWARE(FIRMWARE_8411_1);
+MODULE_FIRMWARE(FIRMWARE_8106E_1);
+MODULE_FIRMWARE(FIRMWARE_8168G_1);
static void rtl_lock_work(struct rtl8169_private *tp)
{
@@ -818,47 +844,113 @@ static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
}
}
+struct rtl_cond {
+ bool (*check)(struct rtl8169_private *);
+ const char *msg;
+};
+
+static void rtl_udelay(unsigned int d)
+{
+ udelay(d);
+}
+
+static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
+ void (*delay)(unsigned int), unsigned int d, int n,
+ bool high)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ delay(d);
+ if (c->check(tp) == high)
+ return true;
+ }
+ netif_err(tp, drv, tp->dev, c->msg);
+ return false;
+}
+
+static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned int d, int n)
+{
+ return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
+}
+
+static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned int d, int n)
+{
+ return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
+}
+
+static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned int d, int n)
+{
+ return rtl_loop_wait(tp, c, msleep, d, n, true);
+}
+
+static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned int d, int n)
+{
+ return rtl_loop_wait(tp, c, msleep, d, n, false);
+}
+
+#define DECLARE_RTL_COND(name) \
+static bool name ## _check(struct rtl8169_private *); \
+ \
+static const struct rtl_cond name = { \
+ .check = name ## _check, \
+ .msg = #name \
+}; \
+ \
+static bool name ## _check(struct rtl8169_private *tp)
+
+DECLARE_RTL_COND(rtl_ocpar_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(OCPAR) & OCPAR_FLAG;
+}
+
static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
{
void __iomem *ioaddr = tp->mmio_addr;
- int i;
RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
- for (i = 0; i < 20; i++) {
- udelay(100);
- if (RTL_R32(OCPAR) & OCPAR_FLAG)
- break;
- }
- return RTL_R32(OCPDR);
+
+ return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
+ RTL_R32(OCPDR) : ~0;
}
static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
{
void __iomem *ioaddr = tp->mmio_addr;
- int i;
RTL_W32(OCPDR, data);
RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
- for (i = 0; i < 20; i++) {
- udelay(100);
- if ((RTL_R32(OCPAR) & OCPAR_FLAG) == 0)
- break;
- }
+
+ rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
+}
+
+DECLARE_RTL_COND(rtl_eriar_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(ERIAR) & ERIAR_FLAG;
}
static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
{
void __iomem *ioaddr = tp->mmio_addr;
- int i;
RTL_W8(ERIDR, cmd);
RTL_W32(ERIAR, 0x800010e8);
msleep(2);
- for (i = 0; i < 5; i++) {
- udelay(100);
- if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
- break;
- }
+
+ if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
+ return;
ocp_write(tp, 0x1, 0x30, 0x00000001);
}
@@ -872,36 +964,27 @@ static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
}
-static void rtl8168_driver_start(struct rtl8169_private *tp)
+DECLARE_RTL_COND(rtl_ocp_read_cond)
{
u16 reg;
- int i;
-
- rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
reg = rtl8168_get_ocp_reg(tp);
- for (i = 0; i < 10; i++) {
- msleep(10);
- if (ocp_read(tp, 0x0f, reg) & 0x00000800)
- break;
- }
+ return ocp_read(tp, 0x0f, reg) & 0x00000800;
}
-static void rtl8168_driver_stop(struct rtl8169_private *tp)
+static void rtl8168_driver_start(struct rtl8169_private *tp)
{
- u16 reg;
- int i;
+ rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
- rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
+ rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
+}
- reg = rtl8168_get_ocp_reg(tp);
+static void rtl8168_driver_stop(struct rtl8169_private *tp)
+{
+ rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
- for (i = 0; i < 10; i++) {
- msleep(10);
- if ((ocp_read(tp, 0x0f, reg) & 0x00000800) == 0)
- break;
- }
+ rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
}
static int r8168dp_check_dash(struct rtl8169_private *tp)
@@ -911,21 +994,124 @@ static int r8168dp_check_dash(struct rtl8169_private *tp)
return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
}
-static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
+static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
{
- int i;
+ if (reg & 0xffff0001) {
+ netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
+ return true;
+ }
+ return false;
+}
- RTL_W32(PHYAR, 0x80000000 | (reg_addr & 0x1f) << 16 | (value & 0xffff));
+DECLARE_RTL_COND(rtl_ocp_gphy_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
- for (i = 20; i > 0; i--) {
- /*
- * Check if the RTL8169 has completed writing to the specified
- * MII register.
- */
- if (!(RTL_R32(PHYAR) & 0x80000000))
- break;
- udelay(25);
+ return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
+}
+
+static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if (rtl_ocp_reg_failure(tp, reg))
+ return;
+
+ RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
+
+ rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
+}
+
+static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if (rtl_ocp_reg_failure(tp, reg))
+ return 0;
+
+ RTL_W32(GPHY_OCP, reg << 15);
+
+ return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
+ (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
+}
+
+static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
+{
+ int val;
+
+ val = r8168_phy_ocp_read(tp, reg);
+ r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
+}
+
+DECLARE_RTL_COND(rtl_ocpdr_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(OCPDR) & OCPAR_FLAG;
+}
+
+static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if (rtl_ocp_reg_failure(tp, reg))
+ return;
+
+ RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
+
+ rtl_udelay_loop_wait_low(tp, &rtl_ocpdr_cond, 25, 10);
+}
+
+static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if (rtl_ocp_reg_failure(tp, reg))
+ return 0;
+
+ RTL_W32(OCPDR, reg << 15);
+
+ return rtl_udelay_loop_wait_high(tp, &rtl_ocpdr_cond, 25, 10) ?
+ RTL_R32(OCPDR) : ~0;
+}
+
+#define OCP_STD_PHY_BASE 0xa400
+
+static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
+{
+ if (reg == 0x1f) {
+ tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
+ return;
}
+
+ if (tp->ocp_base != OCP_STD_PHY_BASE)
+ reg -= 0x10;
+
+ r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
+}
+
+static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
+{
+ if (tp->ocp_base != OCP_STD_PHY_BASE)
+ reg -= 0x10;
+
+ return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
+}
+
+DECLARE_RTL_COND(rtl_phyar_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(PHYAR) & 0x80000000;
+}
+
+static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
+
+ rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
/*
* According to hardware specs a 20us delay is required after write
* complete indication, but before sending next command.
@@ -933,23 +1119,16 @@ static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
udelay(20);
}
-static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr)
+static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
{
- int i, value = -1;
+ void __iomem *ioaddr = tp->mmio_addr;
+ int value;
- RTL_W32(PHYAR, 0x0 | (reg_addr & 0x1f) << 16);
+ RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
+
+ value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
+ RTL_R32(PHYAR) & 0xffff : ~0;
- for (i = 20; i > 0; i--) {
- /*
- * Check if the RTL8169 has completed retrieving data from
- * the specified MII register.
- */
- if (RTL_R32(PHYAR) & 0x80000000) {
- value = RTL_R32(PHYAR) & 0xffff;
- break;
- }
- udelay(25);
- }
/*
* According to hardware specs a 20us delay is required after read
* complete indication, but before sending next command.
@@ -959,45 +1138,35 @@ static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr)
return value;
}
-static void r8168dp_1_mdio_access(void __iomem *ioaddr, int reg_addr, u32 data)
+static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
{
- int i;
+ void __iomem *ioaddr = tp->mmio_addr;
- RTL_W32(OCPDR, data |
- ((reg_addr & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
+ RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
RTL_W32(EPHY_RXER_NUM, 0);
- for (i = 0; i < 100; i++) {
- mdelay(1);
- if (!(RTL_R32(OCPAR) & OCPAR_FLAG))
- break;
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
}
-static void r8168dp_1_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
+static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
{
- r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_WRITE_CMD |
- (value & OCPDR_DATA_MASK));
+ r8168dp_1_mdio_access(tp, reg,
+ OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
}
-static int r8168dp_1_mdio_read(void __iomem *ioaddr, int reg_addr)
+static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
{
- int i;
+ void __iomem *ioaddr = tp->mmio_addr;
- r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_READ_CMD);
+ r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
mdelay(1);
RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
RTL_W32(EPHY_RXER_NUM, 0);
- for (i = 0; i < 100; i++) {
- mdelay(1);
- if (RTL_R32(OCPAR) & OCPAR_FLAG)
- break;
- }
-
- return RTL_R32(OCPDR) & OCPDR_DATA_MASK;
+ return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
+ RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
}
#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
@@ -1012,22 +1181,25 @@ static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
}
-static void r8168dp_2_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
+static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
{
+ void __iomem *ioaddr = tp->mmio_addr;
+
r8168dp_2_mdio_start(ioaddr);
- r8169_mdio_write(ioaddr, reg_addr, value);
+ r8169_mdio_write(tp, reg, value);
r8168dp_2_mdio_stop(ioaddr);
}
-static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr)
+static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
{
+ void __iomem *ioaddr = tp->mmio_addr;
int value;
r8168dp_2_mdio_start(ioaddr);
- value = r8169_mdio_read(ioaddr, reg_addr);
+ value = r8169_mdio_read(tp, reg);
r8168dp_2_mdio_stop(ioaddr);
@@ -1036,12 +1208,12 @@ static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr)
static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
{
- tp->mdio_ops.write(tp->mmio_addr, location, val);
+ tp->mdio_ops.write(tp, location, val);
}
static int rtl_readphy(struct rtl8169_private *tp, int location)
{
- return tp->mdio_ops.read(tp->mmio_addr, location);
+ return tp->mdio_ops.read(tp, location);
}
static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
@@ -1072,79 +1244,64 @@ static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
return rtl_readphy(tp, location);
}
-static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value)
+DECLARE_RTL_COND(rtl_ephyar_cond)
{
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(EPHYAR) & EPHYAR_FLAG;
+}
+
+static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
(reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
- for (i = 0; i < 100; i++) {
- if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG))
- break;
- udelay(10);
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
+
+ udelay(10);
}
-static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
+static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
{
- u16 value = 0xffff;
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
- for (i = 0; i < 100; i++) {
- if (RTL_R32(EPHYAR) & EPHYAR_FLAG) {
- value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
- break;
- }
- udelay(10);
- }
-
- return value;
+ return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
+ RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
}
-static
-void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type)
+static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
+ u32 val, int type)
{
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
BUG_ON((addr & 3) || (mask == 0));
RTL_W32(ERIDR, val);
RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
- for (i = 0; i < 100; i++) {
- if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
- break;
- udelay(100);
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
}
-static u32 rtl_eri_read(void __iomem *ioaddr, int addr, int type)
+static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
{
- u32 value = ~0x00;
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
- for (i = 0; i < 100; i++) {
- if (RTL_R32(ERIAR) & ERIAR_FLAG) {
- value = RTL_R32(ERIDR);
- break;
- }
- udelay(100);
- }
-
- return value;
+ return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
+ RTL_R32(ERIDR) : ~0;
}
-static void
-rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type)
+static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
+ u32 m, int type)
{
u32 val;
- val = rtl_eri_read(ioaddr, addr, type);
- rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type);
+ val = rtl_eri_read(tp, addr, type);
+ rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
}
struct exgmac_reg {
@@ -1153,31 +1310,30 @@ struct exgmac_reg {
u32 val;
};
-static void rtl_write_exgmac_batch(void __iomem *ioaddr,
+static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
const struct exgmac_reg *r, int len)
{
while (len-- > 0) {
- rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC);
+ rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
r++;
}
}
-static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
+DECLARE_RTL_COND(rtl_efusear_cond)
{
- u8 value = 0xff;
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
- RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
+ return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
+}
- for (i = 0; i < 300; i++) {
- if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) {
- value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK;
- break;
- }
- udelay(100);
- }
+static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
- return value;
+ RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
+
+ return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
+ RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
}
static u16 rtl_get_events(struct rtl8169_private *tp)
@@ -1276,48 +1432,48 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
tp->mac_version == RTL_GIGA_MAC_VER_38) {
if (RTL_R8(PHYstatus) & _1000bpsF) {
- rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
- 0x00000011, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
- 0x00000005, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
+ ERIAR_EXGMAC);
} else if (RTL_R8(PHYstatus) & _100bps) {
- rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
- 0x0000001f, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
- 0x00000005, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
+ ERIAR_EXGMAC);
} else {
- rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
- 0x0000001f, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
- 0x0000003f, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
+ ERIAR_EXGMAC);
}
/* Reset packet filter */
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
ERIAR_EXGMAC);
} else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
tp->mac_version == RTL_GIGA_MAC_VER_36) {
if (RTL_R8(PHYstatus) & _1000bpsF) {
- rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
- 0x00000011, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
- 0x00000005, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
+ ERIAR_EXGMAC);
} else {
- rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
- 0x0000001f, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
- 0x0000003f, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
+ ERIAR_EXGMAC);
}
} else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
if (RTL_R8(PHYstatus) & _10bps) {
- rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011,
- 0x4d02, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_0011,
- 0x0060, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
+ ERIAR_EXGMAC);
} else {
- rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011,
- 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
+ ERIAR_EXGMAC);
}
}
}
@@ -1784,6 +1940,13 @@ static int rtl8169_get_sset_count(struct net_device *dev, int sset)
}
}
+DECLARE_RTL_COND(rtl_counters_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(CounterAddrLow) & CounterDump;
+}
+
static void rtl8169_update_counters(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -1792,7 +1955,6 @@ static void rtl8169_update_counters(struct net_device *dev)
struct rtl8169_counters *counters;
dma_addr_t paddr;
u32 cmd;
- int wait = 1000;
/*
* Some chips are unable to dump tally counters when the receiver
@@ -1810,13 +1972,8 @@ static void rtl8169_update_counters(struct net_device *dev)
RTL_W32(CounterAddrLow, cmd);
RTL_W32(CounterAddrLow, cmd | CounterDump);
- while (wait--) {
- if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) {
- memcpy(&tp->counters, counters, sizeof(*counters));
- break;
- }
- udelay(10);
- }
+ if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
+ memcpy(&tp->counters, counters, sizeof(*counters));
RTL_W32(CounterAddrLow, 0);
RTL_W32(CounterAddrHigh, 0);
@@ -1894,6 +2051,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
u32 val;
int mac_version;
} mac_info[] = {
+ /* 8168G family. */
+ { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
+ { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
+
/* 8168F family. */
{ 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
{ 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
@@ -1933,6 +2094,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
{ 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
/* 8101 family. */
+ { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
+ { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
{ 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
{ 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
{ 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
@@ -2186,7 +2349,7 @@ static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
index -= regno;
break;
case PHY_READ_EFUSE:
- predata = rtl8168d_efuse_read(tp->mmio_addr, regno);
+ predata = rtl8168d_efuse_read(tp, regno);
index++;
break;
case PHY_CLEAR_READCOUNT:
@@ -2626,7 +2789,6 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 },
{ 0x0d, 0xf880 }
};
- void __iomem *ioaddr = tp->mmio_addr;
rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
@@ -2638,7 +2800,7 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
- if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
+ if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
static const struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0002 },
{ 0x05, 0x669a },
@@ -2738,11 +2900,10 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 },
{ 0x0d, 0xf880 }
};
- void __iomem *ioaddr = tp->mmio_addr;
rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
- if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
+ if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
static const struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0002 },
{ 0x05, 0x669a },
@@ -3010,8 +3171,7 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* EEE setting */
- rtl_w1w0_eri(tp->mmio_addr, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003,
- ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
rtl_writephy(tp, 0x1f, 0x0005);
rtl_writephy(tp, 0x05, 0x8b85);
rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
@@ -3115,7 +3275,6 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct phy_reg phy_reg_init[] = {
/* Channel estimation fine tune */
{ 0x1f, 0x0003 },
@@ -3189,7 +3348,7 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* eee setting */
- rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
rtl_writephy(tp, 0x1f, 0x0005);
rtl_writephy(tp, 0x05, 0x8b85);
rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
@@ -3211,6 +3370,55 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
}
+static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const u16 mac_ocp_patch[] = {
+ 0xe008, 0xe01b, 0xe01d, 0xe01f,
+ 0xe021, 0xe023, 0xe025, 0xe027,
+ 0x49d2, 0xf10d, 0x766c, 0x49e2,
+ 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
+
+ 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
+ 0xc707, 0x8ee1, 0x9d6c, 0xc603,
+ 0xbe00, 0xb416, 0x0076, 0xe86c,
+ 0xc602, 0xbe00, 0x0000, 0xc602,
+
+ 0xbe00, 0x0000, 0xc602, 0xbe00,
+ 0x0000, 0xc602, 0xbe00, 0x0000,
+ 0xc602, 0xbe00, 0x0000, 0xc602,
+ 0xbe00, 0x0000, 0xc602, 0xbe00,
+
+ 0x0000, 0x0000, 0x0000, 0x0000
+ };
+ u32 i;
+
+ /* Patch code for GPHY reset */
+ for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
+ r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
+ r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
+ r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
+
+ rtl_apply_firmware(tp);
+
+ if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
+ rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
+ else
+ rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
+
+ if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
+ rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
+ else
+ rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
+
+ rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
+ rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
+
+ r8168_phy_ocp_write(tp, 0xa436, 0x8012);
+ rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
+
+ rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
+}
+
static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -3256,8 +3464,6 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
/* Disable ALDPS before setting firmware */
rtl_writephy(tp, 0x1f, 0x0000);
rtl_writephy(tp, 0x18, 0x0310);
@@ -3266,13 +3472,35 @@ static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
rtl_apply_firmware(tp);
/* EEE setting */
- rtl_eri_write(ioaddr, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_writephy(tp, 0x1f, 0x0004);
rtl_writephy(tp, 0x10, 0x401f);
rtl_writephy(tp, 0x19, 0x7030);
rtl_writephy(tp, 0x1f, 0x0000);
}
+static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0004 },
+ { 0x10, 0xc07f },
+ { 0x19, 0x7030 },
+ { 0x1f, 0x0000 }
+ };
+
+ /* Disable ALDPS before ram code */
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x18, 0x0310);
+ msleep(100);
+
+ rtl_apply_firmware(tp);
+
+ rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+}
+
static void rtl_hw_phy_config(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -3369,6 +3597,15 @@ static void rtl_hw_phy_config(struct net_device *dev)
rtl8411_hw_phy_config(tp);
break;
+ case RTL_GIGA_MAC_VER_39:
+ rtl8106e_hw_phy_config(tp);
+ break;
+
+ case RTL_GIGA_MAC_VER_40:
+ rtl8168g_1_hw_phy_config(tp);
+ break;
+
+ case RTL_GIGA_MAC_VER_41:
default:
break;
}
@@ -3426,18 +3663,16 @@ static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
free_netdev(dev);
}
+DECLARE_RTL_COND(rtl_phy_reset_cond)
+{
+ return tp->phy_reset_pending(tp);
+}
+
static void rtl8169_phy_reset(struct net_device *dev,
struct rtl8169_private *tp)
{
- unsigned int i;
-
tp->phy_reset_enable(tp);
- for (i = 0; i < 100; i++) {
- if (!tp->phy_reset_pending(tp))
- return;
- msleep(1);
- }
- netif_err(tp, link, dev, "PHY reset failed\n");
+ rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
}
static bool rtl_tbi_enabled(struct rtl8169_private *tp)
@@ -3512,7 +3747,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
low >> 16 },
};
- rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e));
+ rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
}
RTL_W8(Cfg9346, Cfg9346_Lock);
@@ -3589,6 +3824,11 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
ops->write = r8168dp_2_mdio_write;
ops->read = r8168dp_2_mdio_read;
break;
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
+ ops->write = r8168g_mdio_write;
+ ops->read = r8168g_mdio_read;
+ break;
default:
ops->write = r8169_mdio_write;
ops->read = r8169_mdio_read;
@@ -3608,6 +3848,9 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
case RTL_GIGA_MAC_VER_38:
+ case RTL_GIGA_MAC_VER_39:
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
RTL_W32(RxConfig, RTL_R32(RxConfig) |
AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
break;
@@ -3761,7 +4004,7 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
tp->mac_version == RTL_GIGA_MAC_VER_33)
- rtl_ephy_write(ioaddr, 0x19, 0xff64);
+ rtl_ephy_write(tp, 0x19, 0xff64);
if (rtl_wol_pll_power_down(tp))
return;
@@ -3830,6 +4073,7 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_29:
case RTL_GIGA_MAC_VER_30:
case RTL_GIGA_MAC_VER_37:
+ case RTL_GIGA_MAC_VER_39:
ops->down = r810x_pll_power_down;
ops->up = r810x_pll_power_up;
break;
@@ -3855,6 +4099,8 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_35:
case RTL_GIGA_MAC_VER_36:
case RTL_GIGA_MAC_VER_38:
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
ops->down = r8168_pll_power_down;
ops->up = r8168_pll_power_up;
break;
@@ -3894,6 +4140,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_22:
case RTL_GIGA_MAC_VER_23:
case RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_34:
RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
default:
@@ -4050,6 +4297,8 @@ static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
* No action needed for jumbo frames with 8169.
* No jumbo for 810x at all.
*/
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
default:
ops->disable = NULL;
ops->enable = NULL;
@@ -4057,20 +4306,20 @@ static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
}
}
+DECLARE_RTL_COND(rtl_chipcmd_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R8(ChipCmd) & CmdReset;
+}
+
static void rtl_hw_reset(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
- int i;
- /* Soft reset the chip. */
RTL_W8(ChipCmd, CmdReset);
- /* Check that the chip has finished the reset. */
- for (i = 0; i < 100; i++) {
- if ((RTL_R8(ChipCmd) & CmdReset) == 0)
- break;
- udelay(100);
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
}
static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
@@ -4124,6 +4373,20 @@ static void rtl_rx_close(struct rtl8169_private *tp)
RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
}
+DECLARE_RTL_COND(rtl_npq_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R8(TxPoll) & NPQ;
+}
+
+DECLARE_RTL_COND(rtl_txcfg_empty_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(TxConfig) & TXCFG_EMPTY;
+}
+
static void rtl8169_hw_reset(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -4136,16 +4399,16 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
tp->mac_version == RTL_GIGA_MAC_VER_28 ||
tp->mac_version == RTL_GIGA_MAC_VER_31) {
- while (RTL_R8(TxPoll) & NPQ)
- udelay(20);
+ rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
} else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
tp->mac_version == RTL_GIGA_MAC_VER_35 ||
tp->mac_version == RTL_GIGA_MAC_VER_36 ||
tp->mac_version == RTL_GIGA_MAC_VER_37 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_40 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_41 ||
tp->mac_version == RTL_GIGA_MAC_VER_38) {
RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
- while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
- udelay(100);
+ rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
} else {
RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
udelay(100);
@@ -4351,15 +4614,12 @@ static void rtl_hw_start_8169(struct net_device *dev)
static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
{
if (tp->csi_ops.write)
- tp->csi_ops.write(tp->mmio_addr, addr, value);
+ tp->csi_ops.write(tp, addr, value);
}
static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
{
- if (tp->csi_ops.read)
- return tp->csi_ops.read(tp->mmio_addr, addr);
- else
- return ~0;
+ return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
}
static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
@@ -4380,73 +4640,56 @@ static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
rtl_csi_access_enable(tp, 0x27000000);
}
-static void r8169_csi_write(void __iomem *ioaddr, int addr, int value)
+DECLARE_RTL_COND(rtl_csiar_cond)
{
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(CSIAR) & CSIAR_FLAG;
+}
+
+static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(CSIDR, value);
RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
- for (i = 0; i < 100; i++) {
- if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
- break;
- udelay(10);
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
}
-static u32 r8169_csi_read(void __iomem *ioaddr, int addr)
+static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
{
- u32 value = ~0x00;
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
- for (i = 0; i < 100; i++) {
- if (RTL_R32(CSIAR) & CSIAR_FLAG) {
- value = RTL_R32(CSIDR);
- break;
- }
- udelay(10);
- }
-
- return value;
+ return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
+ RTL_R32(CSIDR) : ~0;
}
-static void r8402_csi_write(void __iomem *ioaddr, int addr, int value)
+static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
{
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(CSIDR, value);
RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
CSIAR_FUNC_NIC);
- for (i = 0; i < 100; i++) {
- if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
- break;
- udelay(10);
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
}
-static u32 r8402_csi_read(void __iomem *ioaddr, int addr)
+static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
{
- u32 value = ~0x00;
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
- for (i = 0; i < 100; i++) {
- if (RTL_R32(CSIAR) & CSIAR_FLAG) {
- value = RTL_R32(CSIDR);
- break;
- }
- udelay(10);
- }
-
- return value;
+ return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
+ RTL_R32(CSIDR) : ~0;
}
static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp)
@@ -4491,13 +4734,14 @@ struct ephy_info {
u16 bits;
};
-static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len)
+static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
+ int len)
{
u16 w;
while (len-- > 0) {
- w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits;
- rtl_ephy_write(ioaddr, e->offset, w);
+ w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
+ rtl_ephy_write(tp, e->offset, w);
e++;
}
}
@@ -4581,7 +4825,6 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8168cp[] = {
{ 0x01, 0, 0x0001 },
{ 0x02, 0x0800, 0x1000 },
@@ -4592,7 +4835,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
rtl_csi_access_enable_2(tp);
- rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
+ rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
__rtl_hw_start_8168cp(tp);
}
@@ -4643,14 +4886,13 @@ static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
- rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
+ rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
__rtl_hw_start_8168cp(tp);
}
static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8168c_2[] = {
{ 0x01, 0, 0x0001 },
{ 0x03, 0x0400, 0x0220 }
@@ -4658,7 +4900,7 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
rtl_csi_access_enable_2(tp);
- rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
+ rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
__rtl_hw_start_8168cp(tp);
}
@@ -4726,8 +4968,8 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
const struct ephy_info *e = e_info_8168d_4 + i;
u16 w;
- w = rtl_ephy_read(ioaddr, e->offset);
- rtl_ephy_write(ioaddr, 0x03, (w & e->mask) | e->bits);
+ w = rtl_ephy_read(tp, e->offset);
+ rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
}
rtl_enable_clock_request(pdev);
@@ -4755,7 +4997,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
rtl_csi_access_enable_2(tp);
- rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
+ rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
@@ -4781,19 +5023,18 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
rtl_csi_access_enable_1(tp);
- rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
+ rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
RTL_W8(MaxTxPacketSize, EarlySize);
@@ -4819,16 +5060,16 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
RTL_W8(MaxTxPacketSize, EarlySize);
@@ -4853,10 +5094,9 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
rtl_hw_start_8168f(tp);
- rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
+ rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
- rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
- ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
/* Adjust EEE LED frequency */
RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
@@ -4864,7 +5104,6 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
static void rtl_hw_start_8411(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8168f_1[] = {
{ 0x06, 0x00c0, 0x0020 },
{ 0x0f, 0xffff, 0x5200 },
@@ -4874,10 +5113,39 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
rtl_hw_start_8168f(tp);
- rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
+ rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
- rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000,
- ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
+}
+
+static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
+
+ rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+
+ rtl_csi_access_enable_1(tp);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+ RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
+ RTL_W8(MaxTxPacketSize, EarlySize);
+
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+
+ /* Adjust EEE LED frequency */
+ RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
+
+ rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
}
static void rtl_hw_start_8168(struct net_device *dev)
@@ -4981,6 +5249,11 @@ static void rtl_hw_start_8168(struct net_device *dev)
rtl_hw_start_8411(tp);
break;
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
+ rtl_hw_start_8168g_1(tp);
+ break;
+
default:
printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
dev->name, tp->mac_version);
@@ -5035,7 +5308,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
RTL_W8(Config1, cfg1 & ~LEDS0);
- rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
+ rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
}
static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
@@ -5055,7 +5328,7 @@ static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
{
rtl_hw_start_8102e_2(tp);
- rtl_ephy_write(tp->mmio_addr, 0x03, 0xc2f9);
+ rtl_ephy_write(tp, 0x03, 0xc2f9);
}
static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
@@ -5081,15 +5354,13 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
- rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
+ rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
}
static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
rtl_hw_start_8105e_1(tp);
- rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
+ rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
}
static void rtl_hw_start_8402(struct rtl8169_private *tp)
@@ -5108,18 +5379,29 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
- rtl_ephy_init(ioaddr, e_info_8402, ARRAY_SIZE(e_info_8402));
+ rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
- rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
+}
+
+static void rtl_hw_start_8106(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ /* Force LAN exit from ASPM if Rx/Tx are not idle */
+ RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
+
+ RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
+ RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
+ RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
}
static void rtl_hw_start_8101(struct net_device *dev)
@@ -5166,6 +5448,10 @@ static void rtl_hw_start_8101(struct net_device *dev)
case RTL_GIGA_MAC_VER_37:
rtl_hw_start_8402(tp);
break;
+
+ case RTL_GIGA_MAC_VER_39:
+ rtl_hw_start_8106(tp);
+ break;
}
RTL_W8(Cfg9346, Cfg9346_Lock);
@@ -5889,11 +6175,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp)
if (status & LinkChg)
__rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
- napi_disable(&tp->napi);
- rtl_irq_disable(tp);
-
- napi_enable(&tp->napi);
- napi_schedule(&tp->napi);
+ rtl_irq_enable_all(tp);
}
static void rtl_task(struct work_struct *work)
@@ -6438,6 +6720,67 @@ static unsigned rtl_try_msi(struct rtl8169_private *tp,
return msi;
}
+DECLARE_RTL_COND(rtl_link_list_ready_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R8(MCU) & LINK_LIST_RDY;
+}
+
+DECLARE_RTL_COND(rtl_rxtx_empty_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
+}
+
+static void __devinit rtl_hw_init_8168g(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ u32 data;
+
+ tp->ocp_base = OCP_STD_PHY_BASE;
+
+ RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
+
+ if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
+ return;
+
+ if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
+ return;
+
+ RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
+ msleep(1);
+ RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
+
+ data = r8168_mac_ocp_read(tp, 0xe8de);
+ data &= ~(1 << 14);
+ r8168_mac_ocp_write(tp, 0xe8de, data);
+
+ if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
+ return;
+
+ data = r8168_mac_ocp_read(tp, 0xe8de);
+ data |= (1 << 15);
+ r8168_mac_ocp_write(tp, 0xe8de, data);
+
+ if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
+ return;
+}
+
+static void __devinit rtl_hw_initialize(struct rtl8169_private *tp)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
+ rtl_hw_init_8168g(tp);
+ break;
+
+ default:
+ break;
+ }
+}
+
static int __devinit
rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -6547,6 +6890,8 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl_irq_disable(tp);
+ rtl_hw_initialize(tp);
+
rtl_hw_reset(tp);
rtl_ack_events(tp, 0xffff);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 667169b..af0b867 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -49,6 +49,34 @@
NETIF_MSG_RX_ERR| \
NETIF_MSG_TX_ERR)
+#if defined(CONFIG_CPU_SUBTYPE_SH7734) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7763) || \
+ defined(CONFIG_ARCH_R8A7740)
+static void sh_eth_select_mii(struct net_device *ndev)
+{
+ u32 value = 0x0;
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ switch (mdp->phy_interface) {
+ case PHY_INTERFACE_MODE_GMII:
+ value = 0x2;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ value = 0x1;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ value = 0x0;
+ break;
+ default:
+ pr_warn("PHY interface mode was not setup. Set to MII.\n");
+ value = 0x1;
+ break;
+ }
+
+ sh_eth_write(ndev, value, RMII_MII);
+}
+#endif
+
/* There is CPU dependent code */
#if defined(CONFIG_CPU_SUBTYPE_SH7724)
#define SH_ETH_RESET_DEFAULT 1
@@ -102,6 +130,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
#define SH_ETH_HAS_BOTH_MODULES 1
#define SH_ETH_HAS_TSU 1
+static int sh_eth_check_reset(struct net_device *ndev);
+
static void sh_eth_set_duplex(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -176,23 +206,19 @@ static void sh_eth_chip_reset_giga(struct net_device *ndev)
}
static int sh_eth_is_gether(struct sh_eth_private *mdp);
-static void sh_eth_reset(struct net_device *ndev)
+static int sh_eth_reset(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- int cnt = 100;
+ int ret = 0;
if (sh_eth_is_gether(mdp)) {
sh_eth_write(ndev, 0x03, EDSR);
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
EDMR);
- while (cnt > 0) {
- if (!(sh_eth_read(ndev, EDMR) & 0x3))
- break;
- mdelay(1);
- cnt--;
- }
- if (cnt < 0)
- printk(KERN_ERR "Device reset fail\n");
+
+ ret = sh_eth_check_reset(ndev);
+ if (ret)
+ goto out;
/* Table Init */
sh_eth_write(ndev, 0x0, TDLAR);
@@ -210,6 +236,9 @@ static void sh_eth_reset(struct net_device *ndev)
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
EDMR);
}
+
+out:
+ return ret;
}
static void sh_eth_set_duplex_giga(struct net_device *ndev)
@@ -282,7 +311,9 @@ static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
#elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
#define SH_ETH_HAS_TSU 1
+static int sh_eth_check_reset(struct net_device *ndev);
static void sh_eth_reset_hw_crc(struct net_device *ndev);
+
static void sh_eth_chip_reset(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -292,35 +323,6 @@ static void sh_eth_chip_reset(struct net_device *ndev)
mdelay(1);
}
-static void sh_eth_reset(struct net_device *ndev)
-{
- int cnt = 100;
-
- sh_eth_write(ndev, EDSR_ENALL, EDSR);
- sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
- while (cnt > 0) {
- if (!(sh_eth_read(ndev, EDMR) & 0x3))
- break;
- mdelay(1);
- cnt--;
- }
- if (cnt == 0)
- printk(KERN_ERR "Device reset fail\n");
-
- /* Table Init */
- sh_eth_write(ndev, 0x0, TDLAR);
- sh_eth_write(ndev, 0x0, TDFAR);
- sh_eth_write(ndev, 0x0, TDFXR);
- sh_eth_write(ndev, 0x0, TDFFR);
- sh_eth_write(ndev, 0x0, RDLAR);
- sh_eth_write(ndev, 0x0, RDFAR);
- sh_eth_write(ndev, 0x0, RDFXR);
- sh_eth_write(ndev, 0x0, RDFFR);
-
- /* Reset HW CRC register */
- sh_eth_reset_hw_crc(ndev);
-}
-
static void sh_eth_set_duplex(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -377,9 +379,41 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.tsu = 1,
#if defined(CONFIG_CPU_SUBTYPE_SH7734)
.hw_crc = 1,
+ .select_mii = 1,
#endif
};
+static int sh_eth_reset(struct net_device *ndev)
+{
+ int ret = 0;
+
+ sh_eth_write(ndev, EDSR_ENALL, EDSR);
+ sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
+
+ ret = sh_eth_check_reset(ndev);
+ if (ret)
+ goto out;
+
+ /* Table Init */
+ sh_eth_write(ndev, 0x0, TDLAR);
+ sh_eth_write(ndev, 0x0, TDFAR);
+ sh_eth_write(ndev, 0x0, TDFXR);
+ sh_eth_write(ndev, 0x0, TDFFR);
+ sh_eth_write(ndev, 0x0, RDLAR);
+ sh_eth_write(ndev, 0x0, RDFAR);
+ sh_eth_write(ndev, 0x0, RDFXR);
+ sh_eth_write(ndev, 0x0, RDFFR);
+
+ /* Reset HW CRC register */
+ sh_eth_reset_hw_crc(ndev);
+
+ /* Select MII mode */
+ if (sh_eth_my_cpu_data.select_mii)
+ sh_eth_select_mii(ndev);
+out:
+ return ret;
+}
+
static void sh_eth_reset_hw_crc(struct net_device *ndev)
{
if (sh_eth_my_cpu_data.hw_crc)
@@ -388,44 +422,29 @@ static void sh_eth_reset_hw_crc(struct net_device *ndev)
#elif defined(CONFIG_ARCH_R8A7740)
#define SH_ETH_HAS_TSU 1
+static int sh_eth_check_reset(struct net_device *ndev);
+
static void sh_eth_chip_reset(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- unsigned long mii;
/* reset device */
sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
mdelay(1);
- switch (mdp->phy_interface) {
- case PHY_INTERFACE_MODE_GMII:
- mii = 2;
- break;
- case PHY_INTERFACE_MODE_MII:
- mii = 1;
- break;
- case PHY_INTERFACE_MODE_RMII:
- default:
- mii = 0;
- break;
- }
- sh_eth_write(ndev, mii, RMII_MII);
+ sh_eth_select_mii(ndev);
}
-static void sh_eth_reset(struct net_device *ndev)
+static int sh_eth_reset(struct net_device *ndev)
{
- int cnt = 100;
+ int ret = 0;
sh_eth_write(ndev, EDSR_ENALL, EDSR);
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
- while (cnt > 0) {
- if (!(sh_eth_read(ndev, EDMR) & 0x3))
- break;
- mdelay(1);
- cnt--;
- }
- if (cnt == 0)
- printk(KERN_ERR "Device reset fail\n");
+
+ ret = sh_eth_check_reset(ndev);
+ if (ret)
+ goto out;
/* Table Init */
sh_eth_write(ndev, 0x0, TDLAR);
@@ -436,6 +455,9 @@ static void sh_eth_reset(struct net_device *ndev)
sh_eth_write(ndev, 0x0, RDFAR);
sh_eth_write(ndev, 0x0, RDFXR);
sh_eth_write(ndev, 0x0, RDFFR);
+
+out:
+ return ret;
}
static void sh_eth_set_duplex(struct net_device *ndev)
@@ -492,6 +514,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.no_trimd = 1,
.no_ade = 1,
.tsu = 1,
+ .select_mii = 1,
};
#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
@@ -543,11 +566,31 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
#if defined(SH_ETH_RESET_DEFAULT)
/* Chip Reset */
-static void sh_eth_reset(struct net_device *ndev)
+static int sh_eth_reset(struct net_device *ndev)
{
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
mdelay(3);
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
+
+ return 0;
+}
+#else
+static int sh_eth_check_reset(struct net_device *ndev)
+{
+ int ret = 0;
+ int cnt = 100;
+
+ while (cnt > 0) {
+ if (!(sh_eth_read(ndev, EDMR) & 0x3))
+ break;
+ mdelay(1);
+ cnt--;
+ }
+ if (cnt < 0) {
+ printk(KERN_ERR "Device reset fail\n");
+ ret = -ETIMEDOUT;
+ }
+ return ret;
}
#endif
@@ -739,21 +782,23 @@ static void sh_eth_ring_free(struct net_device *ndev)
/* Free Rx skb ringbuffer */
if (mdp->rx_skbuff) {
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_rx_ring; i++) {
if (mdp->rx_skbuff[i])
dev_kfree_skb(mdp->rx_skbuff[i]);
}
}
kfree(mdp->rx_skbuff);
+ mdp->rx_skbuff = NULL;
/* Free Tx skb ringbuffer */
if (mdp->tx_skbuff) {
- for (i = 0; i < TX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_tx_ring; i++) {
if (mdp->tx_skbuff[i])
dev_kfree_skb(mdp->tx_skbuff[i]);
}
}
kfree(mdp->tx_skbuff);
+ mdp->tx_skbuff = NULL;
}
/* format skb and descriptor buffer */
@@ -764,8 +809,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
struct sk_buff *skb;
struct sh_eth_rxdesc *rxdesc = NULL;
struct sh_eth_txdesc *txdesc = NULL;
- int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
- int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
+ int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
+ int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
mdp->cur_rx = mdp->cur_tx = 0;
mdp->dirty_rx = mdp->dirty_tx = 0;
@@ -773,7 +818,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
memset(mdp->rx_ring, 0, rx_ringsize);
/* build Rx ring buffer */
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_rx_ring; i++) {
/* skb */
mdp->rx_skbuff[i] = NULL;
skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
@@ -799,7 +844,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
}
}
- mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
+ mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
/* Mark the last entry as wrapping the ring. */
rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
@@ -807,7 +852,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
memset(mdp->tx_ring, 0, tx_ringsize);
/* build Tx ring buffer */
- for (i = 0; i < TX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_tx_ring; i++) {
mdp->tx_skbuff[i] = NULL;
txdesc = &mdp->tx_ring[i];
txdesc->status = cpu_to_edmac(mdp, TD_TFP);
@@ -841,7 +886,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
mdp->rx_buf_sz += NET_IP_ALIGN;
/* Allocate RX and TX skb rings */
- mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
+ mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * mdp->num_rx_ring,
GFP_KERNEL);
if (!mdp->rx_skbuff) {
dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
@@ -849,7 +894,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
return ret;
}
- mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
+ mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * mdp->num_tx_ring,
GFP_KERNEL);
if (!mdp->tx_skbuff) {
dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
@@ -858,7 +903,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
}
/* Allocate all Rx descriptors. */
- rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
+ rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
GFP_KERNEL);
@@ -872,7 +917,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
mdp->dirty_rx = 0;
/* Allocate all Tx descriptors. */
- tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
+ tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
GFP_KERNEL);
if (!mdp->tx_ring) {
@@ -890,19 +935,41 @@ desc_ring_free:
skb_ring_free:
/* Free Rx and Tx skb ring buffer */
sh_eth_ring_free(ndev);
+ mdp->tx_ring = NULL;
+ mdp->rx_ring = NULL;
return ret;
}
-static int sh_eth_dev_init(struct net_device *ndev)
+static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
+{
+ int ringsize;
+
+ if (mdp->rx_ring) {
+ ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
+ dma_free_coherent(NULL, ringsize, mdp->rx_ring,
+ mdp->rx_desc_dma);
+ mdp->rx_ring = NULL;
+ }
+
+ if (mdp->tx_ring) {
+ ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
+ dma_free_coherent(NULL, ringsize, mdp->tx_ring,
+ mdp->tx_desc_dma);
+ mdp->tx_ring = NULL;
+ }
+}
+
+static int sh_eth_dev_init(struct net_device *ndev, bool start)
{
int ret = 0;
struct sh_eth_private *mdp = netdev_priv(ndev);
- u_int32_t rx_int_var, tx_int_var;
u32 val;
/* Soft Reset */
- sh_eth_reset(ndev);
+ ret = sh_eth_reset(ndev);
+ if (ret)
+ goto out;
/* Descriptor format */
sh_eth_ring_format(ndev);
@@ -926,9 +993,7 @@ static int sh_eth_dev_init(struct net_device *ndev)
/* Frame recv control */
sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
- rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
- tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
- sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER);
+ sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
if (mdp->cd->bculr)
sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
@@ -943,7 +1008,8 @@ static int sh_eth_dev_init(struct net_device *ndev)
RFLR);
sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
- sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+ if (start)
+ sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
/* PAUSE Prohibition */
val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
@@ -958,7 +1024,8 @@ static int sh_eth_dev_init(struct net_device *ndev)
sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
/* E-MAC Interrupt Enable register */
- sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
+ if (start)
+ sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
/* Set MAC address */
update_mac_address(ndev);
@@ -971,11 +1038,14 @@ static int sh_eth_dev_init(struct net_device *ndev)
if (mdp->cd->tpauser)
sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
- /* Setting the Rx mode will start the Rx process. */
- sh_eth_write(ndev, EDRRR_R, EDRRR);
+ if (start) {
+ /* Setting the Rx mode will start the Rx process. */
+ sh_eth_write(ndev, EDRRR_R, EDRRR);
- netif_start_queue(ndev);
+ netif_start_queue(ndev);
+ }
+out:
return ret;
}
@@ -988,7 +1058,7 @@ static int sh_eth_txfree(struct net_device *ndev)
int entry = 0;
for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
- entry = mdp->dirty_tx % TX_RING_SIZE;
+ entry = mdp->dirty_tx % mdp->num_tx_ring;
txdesc = &mdp->tx_ring[entry];
if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
break;
@@ -1001,7 +1071,7 @@ static int sh_eth_txfree(struct net_device *ndev)
freeNum++;
}
txdesc->status = cpu_to_edmac(mdp, TD_TFP);
- if (entry >= TX_RING_SIZE - 1)
+ if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
ndev->stats.tx_packets++;
@@ -1011,13 +1081,13 @@ static int sh_eth_txfree(struct net_device *ndev)
}
/* Packet receive function */
-static int sh_eth_rx(struct net_device *ndev)
+static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_rxdesc *rxdesc;
- int entry = mdp->cur_rx % RX_RING_SIZE;
- int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
+ int entry = mdp->cur_rx % mdp->num_rx_ring;
+ int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
struct sk_buff *skb;
u16 pkt_len = 0;
u32 desc_status;
@@ -1068,13 +1138,13 @@ static int sh_eth_rx(struct net_device *ndev)
ndev->stats.rx_bytes += pkt_len;
}
rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
- entry = (++mdp->cur_rx) % RX_RING_SIZE;
+ entry = (++mdp->cur_rx) % mdp->num_rx_ring;
rxdesc = &mdp->rx_ring[entry];
}
/* Refill the Rx ring buffers. */
for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
- entry = mdp->dirty_rx % RX_RING_SIZE;
+ entry = mdp->dirty_rx % mdp->num_rx_ring;
rxdesc = &mdp->rx_ring[entry];
/* The size of the buffer is 16 byte boundary. */
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
@@ -1091,7 +1161,7 @@ static int sh_eth_rx(struct net_device *ndev)
skb_checksum_none_assert(skb);
rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
}
- if (entry >= RX_RING_SIZE - 1)
+ if (entry >= mdp->num_rx_ring - 1)
rxdesc->status |=
cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
else
@@ -1102,9 +1172,11 @@ static int sh_eth_rx(struct net_device *ndev)
/* Restart Rx engine if stopped. */
/* If we don't need to check status, don't. -KDU */
if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
- /* fix the values for the next receiving */
- mdp->cur_rx = mdp->dirty_rx = (sh_eth_read(ndev, RDFAR) -
- sh_eth_read(ndev, RDLAR)) >> 4;
+ /* fix the values for the next receiving if RDE is set */
+ if (intr_status & EESR_RDE)
+ mdp->cur_rx = mdp->dirty_rx =
+ (sh_eth_read(ndev, RDFAR) -
+ sh_eth_read(ndev, RDLAR)) >> 4;
sh_eth_write(ndev, EDRRR_R, EDRRR);
}
@@ -1273,7 +1345,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
EESR_RTSF | /* short frame recv */
EESR_PRE | /* PHY-LSI recv error */
EESR_CERF)){ /* recv frame CRC error */
- sh_eth_rx(ndev);
+ sh_eth_rx(ndev, intr_status);
}
/* Tx Check */
@@ -1291,14 +1363,6 @@ other_irq:
return ret;
}
-static void sh_eth_timer(unsigned long data)
-{
- struct net_device *ndev = (struct net_device *)data;
- struct sh_eth_private *mdp = netdev_priv(ndev);
-
- mod_timer(&mdp->timer, jiffies + (10 * HZ));
-}
-
/* PHY state control function */
static void sh_eth_adjust_link(struct net_device *ndev)
{
@@ -1497,6 +1561,71 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
}
}
+static void sh_eth_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ ring->rx_max_pending = RX_RING_MAX;
+ ring->tx_max_pending = TX_RING_MAX;
+ ring->rx_pending = mdp->num_rx_ring;
+ ring->tx_pending = mdp->num_tx_ring;
+}
+
+static int sh_eth_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int ret;
+
+ if (ring->tx_pending > TX_RING_MAX ||
+ ring->rx_pending > RX_RING_MAX ||
+ ring->tx_pending < TX_RING_MIN ||
+ ring->rx_pending < RX_RING_MIN)
+ return -EINVAL;
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (netif_running(ndev)) {
+ netif_tx_disable(ndev);
+ /* Disable interrupts by clearing the interrupt mask. */
+ sh_eth_write(ndev, 0x0000, EESIPR);
+ /* Stop the chip's Tx and Rx processes. */
+ sh_eth_write(ndev, 0, EDTRR);
+ sh_eth_write(ndev, 0, EDRRR);
+ synchronize_irq(ndev->irq);
+ }
+
+ /* Free all the skbuffs in the Rx queue. */
+ sh_eth_ring_free(ndev);
+ /* Free DMA buffer */
+ sh_eth_free_dma_buffer(mdp);
+
+ /* Set new parameters */
+ mdp->num_rx_ring = ring->rx_pending;
+ mdp->num_tx_ring = ring->tx_pending;
+
+ ret = sh_eth_ring_init(ndev);
+ if (ret < 0) {
+ dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__);
+ return ret;
+ }
+ ret = sh_eth_dev_init(ndev, false);
+ if (ret < 0) {
+ dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__);
+ return ret;
+ }
+
+ if (netif_running(ndev)) {
+ sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+ /* Setting the Rx mode will start the Rx process. */
+ sh_eth_write(ndev, EDRRR_R, EDRRR);
+ netif_wake_queue(ndev);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_settings = sh_eth_get_settings,
.set_settings = sh_eth_set_settings,
@@ -1507,6 +1636,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_strings = sh_eth_get_strings,
.get_ethtool_stats = sh_eth_get_ethtool_stats,
.get_sset_count = sh_eth_get_sset_count,
+ .get_ringparam = sh_eth_get_ringparam,
+ .set_ringparam = sh_eth_set_ringparam,
};
/* network device open function */
@@ -1537,7 +1668,7 @@ static int sh_eth_open(struct net_device *ndev)
goto out_free_irq;
/* device init */
- ret = sh_eth_dev_init(ndev);
+ ret = sh_eth_dev_init(ndev, true);
if (ret)
goto out_free_irq;
@@ -1546,11 +1677,6 @@ static int sh_eth_open(struct net_device *ndev)
if (ret)
goto out_free_irq;
- /* Set the timer to check for link beat. */
- init_timer(&mdp->timer);
- mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
- setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
-
return ret;
out_free_irq:
@@ -1575,11 +1701,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
/* tx_errors count up */
ndev->stats.tx_errors++;
- /* timer off */
- del_timer_sync(&mdp->timer);
-
/* Free all the skbuffs in the Rx queue. */
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_rx_ring; i++) {
rxdesc = &mdp->rx_ring[i];
rxdesc->status = 0;
rxdesc->addr = 0xBADF00D0;
@@ -1587,18 +1710,14 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
dev_kfree_skb(mdp->rx_skbuff[i]);
mdp->rx_skbuff[i] = NULL;
}
- for (i = 0; i < TX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_tx_ring; i++) {
if (mdp->tx_skbuff[i])
dev_kfree_skb(mdp->tx_skbuff[i]);
mdp->tx_skbuff[i] = NULL;
}
/* device init */
- sh_eth_dev_init(ndev);
-
- /* timer on */
- mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
- add_timer(&mdp->timer);
+ sh_eth_dev_init(ndev, true);
}
/* Packet transmit function */
@@ -1610,7 +1729,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
unsigned long flags;
spin_lock_irqsave(&mdp->lock, flags);
- if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
+ if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
if (!sh_eth_txfree(ndev)) {
if (netif_msg_tx_queued(mdp))
dev_warn(&ndev->dev, "TxFD exhausted.\n");
@@ -1621,7 +1740,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
spin_unlock_irqrestore(&mdp->lock, flags);
- entry = mdp->cur_tx % TX_RING_SIZE;
+ entry = mdp->cur_tx % mdp->num_tx_ring;
mdp->tx_skbuff[entry] = skb;
txdesc = &mdp->tx_ring[entry];
/* soft swap. */
@@ -1635,7 +1754,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
else
txdesc->buffer_length = skb->len;
- if (entry >= TX_RING_SIZE - 1)
+ if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
else
txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
@@ -1652,7 +1771,6 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
static int sh_eth_close(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- int ringsize;
netif_stop_queue(ndev);
@@ -1671,18 +1789,11 @@ static int sh_eth_close(struct net_device *ndev)
free_irq(ndev->irq, ndev);
- del_timer_sync(&mdp->timer);
-
/* Free all the skbuffs in the Rx queue. */
sh_eth_ring_free(ndev);
/* free DMA buffer */
- ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
- dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
-
- /* free DMA buffer */
- ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
- dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
+ sh_eth_free_dma_buffer(mdp);
pm_runtime_put_sync(&mdp->pdev->dev);
@@ -2273,6 +2384,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
ether_setup(ndev);
mdp = netdev_priv(ndev);
+ mdp->num_tx_ring = TX_RING_SIZE;
+ mdp->num_rx_ring = RX_RING_SIZE;
mdp->addr = ioremap(res->start, resource_size(res));
if (mdp->addr == NULL) {
ret = -ENOMEM;
@@ -2310,8 +2423,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* debug message level */
mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
- mdp->post_rx = POST_RX >> (devno << 1);
- mdp->post_fw = POST_FW >> (devno << 1);
/* read and set MAC address */
read_mac_address(ndev, pd->mac_addr);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 57b8e1f..bae84fd 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -27,6 +27,10 @@
#define TX_TIMEOUT (5*HZ)
#define TX_RING_SIZE 64 /* Tx ring size */
#define RX_RING_SIZE 64 /* Rx ring size */
+#define TX_RING_MIN 64
+#define RX_RING_MIN 64
+#define TX_RING_MAX 1024
+#define RX_RING_MAX 1024
#define ETHERSMALL 60
#define PKT_BUF_SZ 1538
#define SH_ETH_TSU_TIMEOUT_MS 500
@@ -585,71 +589,6 @@ enum RPADIR_BIT {
/* FDR */
#define DEFAULT_FDR_INIT 0x00000707
-enum phy_offsets {
- PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3,
- PHY_ANA = 4, PHY_ANL = 5, PHY_ANE = 6,
- PHY_16 = 16,
-};
-
-/* PHY_CTRL */
-enum PHY_CTRL_BIT {
- PHY_C_RESET = 0x8000, PHY_C_LOOPBK = 0x4000, PHY_C_SPEEDSL = 0x2000,
- PHY_C_ANEGEN = 0x1000, PHY_C_PWRDN = 0x0800, PHY_C_ISO = 0x0400,
- PHY_C_RANEG = 0x0200, PHY_C_DUPLEX = 0x0100, PHY_C_COLT = 0x0080,
-};
-#define DM9161_PHY_C_ANEGEN 0 /* auto nego special */
-
-/* PHY_STAT */
-enum PHY_STAT_BIT {
- PHY_S_100T4 = 0x8000, PHY_S_100X_F = 0x4000, PHY_S_100X_H = 0x2000,
- PHY_S_10T_F = 0x1000, PHY_S_10T_H = 0x0800, PHY_S_ANEGC = 0x0020,
- PHY_S_RFAULT = 0x0010, PHY_S_ANEGA = 0x0008, PHY_S_LINK = 0x0004,
- PHY_S_JAB = 0x0002, PHY_S_EXTD = 0x0001,
-};
-
-/* PHY_ANA */
-enum PHY_ANA_BIT {
- PHY_A_NP = 0x8000, PHY_A_ACK = 0x4000, PHY_A_RF = 0x2000,
- PHY_A_FCS = 0x0400, PHY_A_T4 = 0x0200, PHY_A_FDX = 0x0100,
- PHY_A_HDX = 0x0080, PHY_A_10FDX = 0x0040, PHY_A_10HDX = 0x0020,
- PHY_A_SEL = 0x001e,
-};
-/* PHY_ANL */
-enum PHY_ANL_BIT {
- PHY_L_NP = 0x8000, PHY_L_ACK = 0x4000, PHY_L_RF = 0x2000,
- PHY_L_FCS = 0x0400, PHY_L_T4 = 0x0200, PHY_L_FDX = 0x0100,
- PHY_L_HDX = 0x0080, PHY_L_10FDX = 0x0040, PHY_L_10HDX = 0x0020,
- PHY_L_SEL = 0x001f,
-};
-
-/* PHY_ANE */
-enum PHY_ANE_BIT {
- PHY_E_PDF = 0x0010, PHY_E_LPNPA = 0x0008, PHY_E_NPA = 0x0004,
- PHY_E_PRX = 0x0002, PHY_E_LPANEGA = 0x0001,
-};
-
-/* DM9161 */
-enum PHY_16_BIT {
- PHY_16_BP4B45 = 0x8000, PHY_16_BPSCR = 0x4000, PHY_16_BPALIGN = 0x2000,
- PHY_16_BP_ADPOK = 0x1000, PHY_16_Repeatmode = 0x0800,
- PHY_16_TXselect = 0x0400,
- PHY_16_Rsvd = 0x0200, PHY_16_RMIIEnable = 0x0100,
- PHY_16_Force100LNK = 0x0080,
- PHY_16_APDLED_CTL = 0x0040, PHY_16_COLLED_CTL = 0x0020,
- PHY_16_RPDCTR_EN = 0x0010,
- PHY_16_ResetStMch = 0x0008, PHY_16_PreamSupr = 0x0004,
- PHY_16_Sleepmode = 0x0002,
- PHY_16_RemoteLoopOut = 0x0001,
-};
-
-#define POST_RX 0x08
-#define POST_FW 0x04
-#define POST0_RX (POST_RX)
-#define POST0_FW (POST_FW)
-#define POST1_RX (POST_RX >> 2)
-#define POST1_FW (POST_FW >> 2)
-#define POST_ALL (POST0_RX | POST0_FW | POST1_RX | POST1_FW)
-
/* ARSTR */
enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, };
@@ -757,6 +696,7 @@ struct sh_eth_cpu_data {
unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */
unsigned hw_crc:1; /* E-DMAC have CSMR */
+ unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */
};
struct sh_eth_private {
@@ -765,13 +705,14 @@ struct sh_eth_private {
const u16 *reg_offset;
void __iomem *addr;
void __iomem *tsu_addr;
+ u32 num_rx_ring;
+ u32 num_tx_ring;
dma_addr_t rx_desc_dma;
dma_addr_t tx_desc_dma;
struct sh_eth_rxdesc *rx_ring;
struct sh_eth_txdesc *tx_ring;
struct sk_buff **rx_skbuff;
struct sk_buff **tx_skbuff;
- struct timer_list timer;
spinlock_t lock;
u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
u32 cur_tx, dirty_tx;
@@ -786,10 +727,6 @@ struct sh_eth_private {
int msg_enable;
int speed;
int duplex;
- u32 rx_int_var, tx_int_var; /* interrupt control variables */
- char post_rx; /* POST receive */
- char post_fw; /* POST forward */
- struct net_device_stats tsu_stats; /* TSU forward status */
int port; /* for TSU */
int vlan_num_ids; /* for VLAN tag filter */
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 0310b9f..db4beed 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -48,8 +48,7 @@
/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
-/**
- * MCDI version 1
+/* MCDI version 1
*
* Each MCDI request starts with an MCDI_HEADER, which is a 32byte
* structure, filled in by the client.
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 0e57535..a1965c0 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -527,7 +527,7 @@ struct efx_phy_operations {
};
/**
- * @enum efx_phy_mode - PHY operating mode flags
+ * enum efx_phy_mode - PHY operating mode flags
* @PHY_MODE_NORMAL: on and should pass traffic
* @PHY_MODE_TX_DISABLED: on with TX disabled
* @PHY_MODE_LOW_POWER: set to low power through MDIO
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 243e91f..fca61fe 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -336,6 +336,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
/**
* efx_fast_push_rx_descriptors - push new RX descriptors quickly
* @rx_queue: RX descriptor queue
+ *
* This will aim to fill the RX descriptor queue up to
* @rx_queue->@max_fill. If there is insufficient atomic
* memory to do so, a slow fill will be scheduled.
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index ac149d9..b5ba308 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -583,7 +583,7 @@ static inline void ioc3_rx(struct net_device *dev)
unsigned long *rxr;
u32 w0, err;
- rxr = (unsigned long *) ip->rxr; /* Ring base */
+ rxr = ip->rxr; /* Ring base */
rx_entry = ip->rx_ci; /* RX consume index */
n_entry = ip->rx_pi;
@@ -903,7 +903,7 @@ static void ioc3_alloc_rings(struct net_device *dev)
if (ip->rxr == NULL) {
/* Allocate and initialize rx ring. 4kb = 512 entries */
ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
- rxr = (unsigned long *) ip->rxr;
+ rxr = ip->rxr;
if (!rxr)
printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n");
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 8814b2f..8d15f7a 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -773,7 +773,7 @@ static int smc911x_phy_fixed(struct net_device *dev)
return 1;
}
-/*
+/**
* smc911x_phy_reset - reset the phy
* @dev: net device
* @phy: phy address
@@ -819,7 +819,7 @@ static int smc911x_phy_reset(struct net_device *dev, int phy)
return reg & PMT_CTRL_PHY_RST_;
}
-/*
+/**
* smc911x_phy_powerdown - powerdown phy
* @dev: net device
* @phy: phy address
@@ -837,7 +837,7 @@ static void smc911x_phy_powerdown(struct net_device *dev, int phy)
SMC_SET_PHY_BMCR(lp, phy, bmcr);
}
-/*
+/**
* smc911x_phy_check_media - check the media status and adjust BMCR
* @dev: net device
* @init: set true for initialisation
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index fee4493..318adc9 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -942,7 +942,7 @@ static int smc_phy_fixed(struct net_device *dev)
return 1;
}
-/*
+/**
* smc_phy_reset - reset the phy
* @dev: net device
* @phy: phy address
@@ -976,7 +976,7 @@ static int smc_phy_reset(struct net_device *dev, int phy)
return bmcr & BMCR_RESET;
}
-/*
+/**
* smc_phy_powerdown - powerdown phy
* @dev: net device
*
@@ -1000,7 +1000,7 @@ static void smc_phy_powerdown(struct net_device *dev)
smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN);
}
-/*
+/**
* smc_phy_check_media - check the media status and adjust TCR
* @dev: net device
* @init: set true for initialisation
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 1466e5d..54ca99d 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1442,6 +1442,14 @@ smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, u8 dev_addr[6])
smsc911x_mac_write(pdata, ADDRL, mac_low32);
}
+static void smsc911x_disable_irq_chip(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+
+ smsc911x_reg_write(pdata, INT_EN, 0);
+ smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
+}
+
static int smsc911x_open(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
@@ -1494,8 +1502,7 @@ static int smsc911x_open(struct net_device *dev)
spin_unlock_irq(&pdata->mac_lock);
/* Initialise irqs, but leave all sources disabled */
- smsc911x_reg_write(pdata, INT_EN, 0);
- smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
+ smsc911x_disable_irq_chip(dev);
/* Set interrupt deassertion to 100uS */
intcfg = ((10 << 24) | INT_CFG_IRQ_EN_);
@@ -2215,9 +2222,6 @@ static int __devinit smsc911x_init(struct net_device *dev)
if (smsc911x_soft_reset(pdata))
return -ENODEV;
- /* Disable all interrupt sources until we bring the device up */
- smsc911x_reg_write(pdata, INT_EN, 0);
-
ether_setup(dev);
dev->flags |= IFF_MULTICAST;
netif_napi_add(dev, &pdata->napi, smsc911x_poll, SMSC_NAPI_WEIGHT);
@@ -2434,8 +2438,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
smsc911x_reg_write(pdata, INT_CFG, intcfg);
/* Ensure interrupts are globally disabled before connecting ISR */
- smsc911x_reg_write(pdata, INT_EN, 0);
- smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
+ smsc911x_disable_irq_chip(dev);
retval = request_irq(dev->irq, smsc911x_irqhandler,
irq_flags | IRQF_SHARED, dev->name, dev);
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index fd33b21..1fcd914e 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1640,8 +1640,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_free_io_4;
/* descriptors are aligned due to the nature of pci_alloc_consistent */
- pd->tx_ring = (struct smsc9420_dma_desc *)
- (pd->rx_ring + RX_RING_SIZE);
+ pd->tx_ring = (pd->rx_ring + RX_RING_SIZE);
pd->tx_dma_addr = pd->rx_dma_addr +
sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 0364283..9f44827 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -13,7 +13,7 @@ config STMMAC_ETH
if STMMAC_ETH
config STMMAC_PLATFORM
- tristate "STMMAC platform bus support"
+ bool "STMMAC Platform bus support"
depends on STMMAC_ETH
default y
---help---
@@ -26,7 +26,7 @@ config STMMAC_PLATFORM
If unsure, say N.
config STMMAC_PCI
- tristate "STMMAC support on PCI bus (EXPERIMENTAL)"
+ bool "STMMAC PCI bus support (EXPERIMENTAL)"
depends on STMMAC_ETH && PCI && EXPERIMENTAL
---help---
This is to select the Synopsys DWMAC available on PCI devices,
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index bcd54d6..e2d0832 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -95,6 +95,16 @@ struct stmmac_extra_stats {
unsigned long poll_n;
unsigned long sched_timer_n;
unsigned long normal_irq_n;
+ unsigned long mmc_tx_irq_n;
+ unsigned long mmc_rx_irq_n;
+ unsigned long mmc_rx_csum_offload_irq_n;
+ /* EEE */
+ unsigned long irq_receive_pmt_irq_n;
+ unsigned long irq_tx_path_in_lpi_mode_n;
+ unsigned long irq_tx_path_exit_lpi_mode_n;
+ unsigned long irq_rx_path_in_lpi_mode_n;
+ unsigned long irq_rx_path_exit_lpi_mode_n;
+ unsigned long phy_eee_wakeup_error_n;
};
/* CSR Frequency Access Defines*/
@@ -162,6 +172,17 @@ enum tx_dma_irq_status {
handle_tx_rx = 3,
};
+enum core_specific_irq_mask {
+ core_mmc_tx_irq = 1,
+ core_mmc_rx_irq = 2,
+ core_mmc_rx_csum_offload_irq = 4,
+ core_irq_receive_pmt_irq = 8,
+ core_irq_tx_path_in_lpi_mode = 16,
+ core_irq_tx_path_exit_lpi_mode = 32,
+ core_irq_rx_path_in_lpi_mode = 64,
+ core_irq_rx_path_exit_lpi_mode = 128,
+};
+
/* DMA HW capabilities */
struct dma_features {
unsigned int mbps_10_100;
@@ -208,6 +229,10 @@ struct dma_features {
#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
+/* Default LPI timers */
+#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8
+#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0
+
struct stmmac_desc_ops {
/* DMA RX descriptor ring initialization */
void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
@@ -278,7 +303,7 @@ struct stmmac_ops {
/* Dump MAC registers */
void (*dump_regs) (void __iomem *ioaddr);
/* Handle extra events on specific interrupts hw dependent */
- void (*host_irq_status) (void __iomem *ioaddr);
+ int (*host_irq_status) (void __iomem *ioaddr);
/* Multicast filter setting */
void (*set_filter) (struct net_device *dev, int id);
/* Flow control setting */
@@ -291,6 +316,10 @@ struct stmmac_ops {
unsigned int reg_n);
void (*get_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
unsigned int reg_n);
+ void (*set_eee_mode) (void __iomem *ioaddr);
+ void (*reset_eee_mode) (void __iomem *ioaddr);
+ void (*set_eee_timer) (void __iomem *ioaddr, int ls, int tw);
+ void (*set_eee_pls) (void __iomem *ioaddr, int link);
};
struct mac_link {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 23478bf..f90fcb5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -36,6 +36,7 @@
#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
enum dwmac1000_irq_status {
+ lpiis_irq = 0x400,
time_stamp_irq = 0x0200,
mmc_rx_csum_offload_irq = 0x0080,
mmc_tx_irq = 0x0040,
@@ -60,6 +61,25 @@ enum power_event {
power_down = 0x00000001,
};
+/* Energy Efficient Ethernet (EEE)
+ *
+ * LPI status, timer and control register offset
+ */
+#define LPI_CTRL_STATUS 0x0030
+#define LPI_TIMER_CTRL 0x0034
+
+/* LPI control and status defines */
+#define LPI_CTRL_STATUS_LPITXA 0x00080000 /* Enable LPI TX Automate */
+#define LPI_CTRL_STATUS_PLSEN 0x00040000 /* Enable PHY Link Status */
+#define LPI_CTRL_STATUS_PLS 0x00020000 /* PHY Link Status */
+#define LPI_CTRL_STATUS_LPIEN 0x00010000 /* LPI Enable */
+#define LPI_CTRL_STATUS_RLPIST 0x00000200 /* Receive LPI state */
+#define LPI_CTRL_STATUS_TLPIST 0x00000100 /* Transmit LPI state */
+#define LPI_CTRL_STATUS_RLPIEX 0x00000008 /* Receive LPI Exit */
+#define LPI_CTRL_STATUS_RLPIEN 0x00000004 /* Receive LPI Entry */
+#define LPI_CTRL_STATUS_TLPIEX 0x00000002 /* Transmit LPI Exit */
+#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
+
/* GMAC HW ADDR regs */
#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
(reg * 8))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index b5e4d02..bfe0226 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -194,26 +194,107 @@ static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
}
-static void dwmac1000_irq_status(void __iomem *ioaddr)
+static int dwmac1000_irq_status(void __iomem *ioaddr)
{
u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+ int status = 0;
/* Not used events (e.g. MMC interrupts) are not handled. */
- if ((intr_status & mmc_tx_irq))
- CHIP_DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
+ if ((intr_status & mmc_tx_irq)) {
+ CHIP_DBG(KERN_INFO "GMAC: MMC tx interrupt: 0x%08x\n",
readl(ioaddr + GMAC_MMC_TX_INTR));
- if (unlikely(intr_status & mmc_rx_irq))
- CHIP_DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
+ status |= core_mmc_tx_irq;
+ }
+ if (unlikely(intr_status & mmc_rx_irq)) {
+ CHIP_DBG(KERN_INFO "GMAC: MMC rx interrupt: 0x%08x\n",
readl(ioaddr + GMAC_MMC_RX_INTR));
- if (unlikely(intr_status & mmc_rx_csum_offload_irq))
- CHIP_DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
+ status |= core_mmc_rx_irq;
+ }
+ if (unlikely(intr_status & mmc_rx_csum_offload_irq)) {
+ CHIP_DBG(KERN_INFO "GMAC: MMC rx csum offload: 0x%08x\n",
readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
+ status |= core_mmc_rx_csum_offload_irq;
+ }
if (unlikely(intr_status & pmt_irq)) {
- CHIP_DBG(KERN_DEBUG "GMAC: received Magic frame\n");
+ CHIP_DBG(KERN_INFO "GMAC: received Magic frame\n");
/* clear the PMT bits 5 and 6 by reading the PMT
* status register. */
readl(ioaddr + GMAC_PMT);
+ status |= core_irq_receive_pmt_irq;
}
+ /* MAC trx/rx EEE LPI entry/exit interrupts */
+ if (intr_status & lpiis_irq) {
+ /* Clean LPI interrupt by reading the Reg 12 */
+ u32 lpi_status = readl(ioaddr + LPI_CTRL_STATUS);
+
+ if (lpi_status & LPI_CTRL_STATUS_TLPIEN) {
+ CHIP_DBG(KERN_INFO "GMAC TX entered in LPI\n");
+ status |= core_irq_tx_path_in_lpi_mode;
+ }
+ if (lpi_status & LPI_CTRL_STATUS_TLPIEX) {
+ CHIP_DBG(KERN_INFO "GMAC TX exit from LPI\n");
+ status |= core_irq_tx_path_exit_lpi_mode;
+ }
+ if (lpi_status & LPI_CTRL_STATUS_RLPIEN) {
+ CHIP_DBG(KERN_INFO "GMAC RX entered in LPI\n");
+ status |= core_irq_rx_path_in_lpi_mode;
+ }
+ if (lpi_status & LPI_CTRL_STATUS_RLPIEX) {
+ CHIP_DBG(KERN_INFO "GMAC RX exit from LPI\n");
+ status |= core_irq_rx_path_exit_lpi_mode;
+ }
+ }
+
+ return status;
+}
+
+static void dwmac1000_set_eee_mode(void __iomem *ioaddr)
+{
+ u32 value;
+
+ /* Enable the link status receive on RGMII, SGMII ore SMII
+ * receive path and instruct the transmit to enter in LPI
+ * state. */
+ value = readl(ioaddr + LPI_CTRL_STATUS);
+ value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ writel(value, ioaddr + LPI_CTRL_STATUS);
+}
+
+static void dwmac1000_reset_eee_mode(void __iomem *ioaddr)
+{
+ u32 value;
+
+ value = readl(ioaddr + LPI_CTRL_STATUS);
+ value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA);
+ writel(value, ioaddr + LPI_CTRL_STATUS);
+}
+
+static void dwmac1000_set_eee_pls(void __iomem *ioaddr, int link)
+{
+ u32 value;
+
+ value = readl(ioaddr + LPI_CTRL_STATUS);
+
+ if (link)
+ value |= LPI_CTRL_STATUS_PLS;
+ else
+ value &= ~LPI_CTRL_STATUS_PLS;
+
+ writel(value, ioaddr + LPI_CTRL_STATUS);
+}
+
+static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
+{
+ int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16);
+
+ /* Program the timers in the LPI timer control register:
+ * LS: minimum time (ms) for which the link
+ * status from PHY should be ok before transmitting
+ * the LPI pattern.
+ * TW: minimum time (us) for which the core waits
+ * after it has stopped transmitting the LPI pattern.
+ */
+ writel(value, ioaddr + LPI_TIMER_CTRL);
}
static const struct stmmac_ops dwmac1000_ops = {
@@ -226,6 +307,10 @@ static const struct stmmac_ops dwmac1000_ops = {
.pmt = dwmac1000_pmt,
.set_umac_addr = dwmac1000_set_umac_addr,
.get_umac_addr = dwmac1000_get_umac_addr,
+ .set_eee_mode = dwmac1000_set_eee_mode,
+ .reset_eee_mode = dwmac1000_reset_eee_mode,
+ .set_eee_timer = dwmac1000_set_eee_timer,
+ .set_eee_pls = dwmac1000_set_eee_pls,
};
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 19e0f4e..f83210e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -72,9 +72,9 @@ static int dwmac100_rx_ipc_enable(void __iomem *ioaddr)
return 0;
}
-static void dwmac100_irq_status(void __iomem *ioaddr)
+static int dwmac100_irq_status(void __iomem *ioaddr)
{
- return;
+ return 0;
}
static void dwmac100_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 6e0360f..e678ce3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -70,6 +70,7 @@
#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
/* DMA Status register defines */
+#define DMA_STATUS_GLPII 0x40000000 /* GMAC LPI interrupt */
#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index fb8377d..4b785e1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -51,7 +51,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 1, bmax,
csum);
-
+ wmb();
entry = (++priv->cur_tx) % txsize;
desc = priv->dma_tx + entry;
@@ -59,6 +59,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
len, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum);
+ wmb();
priv->hw->desc->set_tx_owner(desc);
priv->tx_skbuff[entry] = NULL;
} else {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 6b5d060..ab4c376 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -26,6 +26,7 @@
#include <linux/clk.h>
#include <linux/stmmac.h>
#include <linux/phy.h>
+#include <linux/pci.h>
#include "common.h"
#ifdef CONFIG_STMMAC_TIMER
#include "stmmac_timer.h"
@@ -86,6 +87,12 @@ struct stmmac_priv {
#endif
int clk_csr;
int synopsys_id;
+ struct timer_list eee_ctrl_timer;
+ bool tx_path_in_lpi_mode;
+ int lpi_irq;
+ int eee_enabled;
+ int eee_active;
+ int tx_lpi_timer;
};
extern int phyaddr;
@@ -95,7 +102,6 @@ extern int stmmac_mdio_register(struct net_device *ndev);
extern void stmmac_set_ethtool_ops(struct net_device *netdev);
extern const struct stmmac_desc_ops enh_desc_ops;
extern const struct stmmac_desc_ops ndesc_ops;
-
int stmmac_freeze(struct net_device *ndev);
int stmmac_restore(struct net_device *ndev);
int stmmac_resume(struct net_device *ndev);
@@ -104,12 +110,14 @@ int stmmac_dvr_remove(struct net_device *ndev);
struct stmmac_priv *stmmac_dvr_probe(struct device *device,
struct plat_stmmacenet_data *plat_dat,
void __iomem *addr);
+void stmmac_disable_eee_mode(struct stmmac_priv *priv);
+bool stmmac_eee_init(struct stmmac_priv *priv);
#ifdef CONFIG_HAVE_CLK
static inline int stmmac_clk_enable(struct stmmac_priv *priv)
{
if (!IS_ERR(priv->stmmac_clk))
- return clk_enable(priv->stmmac_clk);
+ return clk_prepare_enable(priv->stmmac_clk);
return 0;
}
@@ -119,7 +127,7 @@ static inline void stmmac_clk_disable(struct stmmac_priv *priv)
if (IS_ERR(priv->stmmac_clk))
return;
- clk_disable(priv->stmmac_clk);
+ clk_disable_unprepare(priv->stmmac_clk);
}
static inline int stmmac_clk_get(struct stmmac_priv *priv)
{
@@ -143,3 +151,60 @@ static inline int stmmac_clk_get(struct stmmac_priv *priv)
return 0;
}
#endif /* CONFIG_HAVE_CLK */
+
+
+#ifdef CONFIG_STMMAC_PLATFORM
+extern struct platform_driver stmmac_pltfr_driver;
+static inline int stmmac_register_platform(void)
+{
+ int err;
+
+ err = platform_driver_register(&stmmac_pltfr_driver);
+ if (err)
+ pr_err("stmmac: failed to register the platform driver\n");
+
+ return err;
+}
+static inline void stmmac_unregister_platform(void)
+{
+ platform_driver_register(&stmmac_pltfr_driver);
+}
+#else
+static inline int stmmac_register_platform(void)
+{
+ pr_debug("stmmac: do not register the platf driver\n");
+
+ return -EINVAL;
+}
+static inline void stmmac_unregister_platform(void)
+{
+}
+#endif /* CONFIG_STMMAC_PLATFORM */
+
+#ifdef CONFIG_STMMAC_PCI
+extern struct pci_driver stmmac_pci_driver;
+static inline int stmmac_register_pci(void)
+{
+ int err;
+
+ err = pci_register_driver(&stmmac_pci_driver);
+ if (err)
+ pr_err("stmmac: failed to register the PCI driver\n");
+
+ return err;
+}
+static inline void stmmac_unregister_pci(void)
+{
+ pci_unregister_driver(&stmmac_pci_driver);
+}
+#else
+static inline int stmmac_register_pci(void)
+{
+ pr_debug("stmmac: do not register the PCI driver\n");
+
+ return -EINVAL;
+}
+static inline void stmmac_unregister_pci(void)
+{
+}
+#endif /* CONFIG_STMMAC_PCI */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index ce43184..76fd61a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -93,6 +93,16 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(poll_n),
STMMAC_STAT(sched_timer_n),
STMMAC_STAT(normal_irq_n),
+ STMMAC_STAT(normal_irq_n),
+ STMMAC_STAT(mmc_tx_irq_n),
+ STMMAC_STAT(mmc_rx_irq_n),
+ STMMAC_STAT(mmc_rx_csum_offload_irq_n),
+ STMMAC_STAT(irq_receive_pmt_irq_n),
+ STMMAC_STAT(irq_tx_path_in_lpi_mode_n),
+ STMMAC_STAT(irq_tx_path_exit_lpi_mode_n),
+ STMMAC_STAT(irq_rx_path_in_lpi_mode_n),
+ STMMAC_STAT(irq_rx_path_exit_lpi_mode_n),
+ STMMAC_STAT(phy_eee_wakeup_error_n),
};
#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
@@ -366,6 +376,11 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
(*(u32 *)p);
}
}
+ if (priv->eee_enabled) {
+ int val = phy_get_eee_err(priv->phydev);
+ if (val)
+ priv->xstats.phy_eee_wakeup_error_n = val;
+ }
}
for (i = 0; i < STMMAC_STATS_LEN; i++) {
char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
@@ -464,6 +479,46 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return 0;
}
+static int stmmac_ethtool_op_get_eee(struct net_device *dev,
+ struct ethtool_eee *edata)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (!priv->dma_cap.eee)
+ return -EOPNOTSUPP;
+
+ edata->eee_enabled = priv->eee_enabled;
+ edata->eee_active = priv->eee_active;
+ edata->tx_lpi_timer = priv->tx_lpi_timer;
+
+ return phy_ethtool_get_eee(priv->phydev, edata);
+}
+
+static int stmmac_ethtool_op_set_eee(struct net_device *dev,
+ struct ethtool_eee *edata)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ priv->eee_enabled = edata->eee_enabled;
+
+ if (!priv->eee_enabled)
+ stmmac_disable_eee_mode(priv);
+ else {
+ /* We are asking for enabling the EEE but it is safe
+ * to verify all by invoking the eee_init function.
+ * In case of failure it will return an error.
+ */
+ priv->eee_enabled = stmmac_eee_init(priv);
+ if (!priv->eee_enabled)
+ return -EOPNOTSUPP;
+
+ /* Do not change tx_lpi_timer in case of failure */
+ priv->tx_lpi_timer = edata->tx_lpi_timer;
+ }
+
+ return phy_ethtool_set_eee(priv->phydev, edata);
+}
+
static const struct ethtool_ops stmmac_ethtool_ops = {
.begin = stmmac_check_if_running,
.get_drvinfo = stmmac_ethtool_getdrvinfo,
@@ -480,6 +535,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_strings = stmmac_get_strings,
.get_wol = stmmac_get_wol,
.set_wol = stmmac_set_wol,
+ .get_eee = stmmac_ethtool_op_get_eee,
+ .set_eee = stmmac_ethtool_op_set_eee,
.get_sset_count = stmmac_get_sset_count,
.get_ts_info = ethtool_op_get_ts_info,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 7096633..f6b04c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -133,6 +133,12 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
+#define STMMAC_DEFAULT_LPI_TIMER 1000
+static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+module_param(eee_timer, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
+#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
+
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
#ifdef CONFIG_STMMAC_DEBUG_FS
@@ -161,6 +167,8 @@ static void stmmac_verify_args(void)
flow_ctrl = FLOW_OFF;
if (unlikely((pause < 0) || (pause > 0xffff)))
pause = PAUSE_TIME;
+ if (eee_timer < 0)
+ eee_timer = STMMAC_DEFAULT_LPI_TIMER;
}
static void stmmac_clk_csr_set(struct stmmac_priv *priv)
@@ -229,6 +237,85 @@ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
phydev->speed);
}
+static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
+{
+ /* Check and enter in LPI mode */
+ if ((priv->dirty_tx == priv->cur_tx) &&
+ (priv->tx_path_in_lpi_mode == false))
+ priv->hw->mac->set_eee_mode(priv->ioaddr);
+}
+
+void stmmac_disable_eee_mode(struct stmmac_priv *priv)
+{
+ /* Exit and disable EEE in case of we are are in LPI state. */
+ priv->hw->mac->reset_eee_mode(priv->ioaddr);
+ del_timer_sync(&priv->eee_ctrl_timer);
+ priv->tx_path_in_lpi_mode = false;
+}
+
+/**
+ * stmmac_eee_ctrl_timer
+ * @arg : data hook
+ * Description:
+ * If there is no data transfer and if we are not in LPI state,
+ * then MAC Transmitter can be moved to LPI state.
+ */
+static void stmmac_eee_ctrl_timer(unsigned long arg)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *)arg;
+
+ stmmac_enable_eee_mode(priv);
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
+}
+
+/**
+ * stmmac_eee_init
+ * @priv: private device pointer
+ * Description:
+ * If the EEE support has been enabled while configuring the driver,
+ * if the GMAC actually supports the EEE (from the HW cap reg) and the
+ * phy can also manage EEE, so enable the LPI state and start the timer
+ * to verify if the tx path can enter in LPI state.
+ */
+bool stmmac_eee_init(struct stmmac_priv *priv)
+{
+ bool ret = false;
+
+ /* MAC core supports the EEE feature. */
+ if (priv->dma_cap.eee) {
+ /* Check if the PHY supports EEE */
+ if (phy_init_eee(priv->phydev, 1))
+ goto out;
+
+ priv->eee_active = 1;
+ init_timer(&priv->eee_ctrl_timer);
+ priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
+ priv->eee_ctrl_timer.data = (unsigned long)priv;
+ priv->eee_ctrl_timer.expires = STMMAC_LPI_TIMER(eee_timer);
+ add_timer(&priv->eee_ctrl_timer);
+
+ priv->hw->mac->set_eee_timer(priv->ioaddr,
+ STMMAC_DEFAULT_LIT_LS_TIMER,
+ priv->tx_lpi_timer);
+
+ pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
+
+ ret = true;
+ }
+out:
+ return ret;
+}
+
+static void stmmac_eee_adjust(struct stmmac_priv *priv)
+{
+ /* When the EEE has been already initialised we have to
+ * modify the PLS bit in the LPI ctrl & status reg according
+ * to the PHY link status. For this reason.
+ */
+ if (priv->eee_enabled)
+ priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
+}
+
/**
* stmmac_adjust_link
* @dev: net device structure
@@ -249,6 +336,7 @@ static void stmmac_adjust_link(struct net_device *dev)
phydev->addr, phydev->link);
spin_lock_irqsave(&priv->lock, flags);
+
if (phydev->link) {
u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
@@ -315,6 +403,8 @@ static void stmmac_adjust_link(struct net_device *dev)
if (new_state && netif_msg_link(priv))
phy_print_status(phydev);
+ stmmac_eee_adjust(priv);
+
spin_unlock_irqrestore(&priv->lock, flags);
DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
@@ -332,7 +422,7 @@ static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
struct phy_device *phydev;
- char phy_id[MII_BUS_ID_SIZE + 3];
+ char phy_id_fmt[MII_BUS_ID_SIZE + 3];
char bus_id[MII_BUS_ID_SIZE];
int interface = priv->plat->interface;
priv->oldlink = 0;
@@ -346,11 +436,12 @@ static int stmmac_init_phy(struct net_device *dev)
snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
priv->plat->bus_id);
- snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+ snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
priv->plat->phy_addr);
- pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
+ pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id_fmt);
- phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0, interface);
+ phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, 0,
+ interface);
if (IS_ERR(phydev)) {
pr_err("%s: Could not attach to PHY\n", dev->name);
@@ -677,7 +768,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
priv->hw->desc->release_tx_desc(p);
- entry = (++priv->dirty_tx) % txsize;
+ priv->dirty_tx++;
}
if (unlikely(netif_queue_stopped(priv->dev) &&
stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
@@ -689,6 +780,11 @@ static void stmmac_tx(struct stmmac_priv *priv)
}
netif_tx_unlock(priv->dev);
}
+
+ if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
+ stmmac_enable_eee_mode(priv);
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
+ }
spin_unlock(&priv->tx_lock);
}
@@ -833,8 +929,9 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
/**
* stmmac_selec_desc_mode
- * @dev : device pointer
- * Description: select the Enhanced/Alternate or Normal descriptors */
+ * @priv : private structure
+ * Description: select the Enhanced/Alternate or Normal descriptors
+ */
static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
{
if (priv->plat->enh_desc) {
@@ -1026,6 +1123,17 @@ static int stmmac_open(struct net_device *dev)
}
}
+ /* Request the IRQ lines */
+ if (priv->lpi_irq != -ENXIO) {
+ ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
+ dev->name, dev);
+ if (unlikely(ret < 0)) {
+ pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
+ __func__, priv->lpi_irq, ret);
+ goto open_error_lpiirq;
+ }
+ }
+
/* Enable the MAC Rx/Tx */
stmmac_set_mac(priv->ioaddr, true);
@@ -1061,12 +1169,19 @@ static int stmmac_open(struct net_device *dev)
if (priv->phydev)
phy_start(priv->phydev);
+ priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER;
+ priv->eee_enabled = stmmac_eee_init(priv);
+
napi_enable(&priv->napi);
skb_queue_head_init(&priv->rx_recycle);
netif_start_queue(dev);
return 0;
+open_error_lpiirq:
+ if (priv->wol_irq != dev->irq)
+ free_irq(priv->wol_irq, dev);
+
open_error_wolirq:
free_irq(dev->irq, dev);
@@ -1092,6 +1207,9 @@ static int stmmac_release(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ if (priv->eee_enabled)
+ del_timer_sync(&priv->eee_ctrl_timer);
+
/* Stop and disconnect the PHY */
if (priv->phydev) {
phy_stop(priv->phydev);
@@ -1114,6 +1232,8 @@ static int stmmac_release(struct net_device *dev)
free_irq(dev->irq, dev);
if (priv->wol_irq != dev->irq)
free_irq(priv->wol_irq, dev);
+ if (priv->lpi_irq != -ENXIO)
+ free_irq(priv->lpi_irq, dev);
/* Stop TX/RX DMA and clear the descriptors */
priv->hw->dma->stop_tx(priv->ioaddr);
@@ -1163,6 +1283,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock(&priv->tx_lock);
+ if (priv->tx_path_in_lpi_mode)
+ stmmac_disable_eee_mode(priv);
+
entry = priv->cur_tx % txsize;
#ifdef STMMAC_XMIT_DEBUG
@@ -1211,6 +1334,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
wmb();
priv->hw->desc->set_tx_owner(desc);
+ wmb();
}
/* Interrupt on completition only for the latest segment */
@@ -1226,6 +1350,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* To avoid raise condition */
priv->hw->desc->set_tx_owner(first);
+ wmb();
priv->cur_tx++;
@@ -1289,6 +1414,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
}
wmb();
priv->hw->desc->set_rx_owner(p + entry);
+ wmb();
}
}
@@ -1307,7 +1433,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
display_ring(priv->dma_rx, rxsize);
}
#endif
- count = 0;
while (!priv->hw->desc->get_rx_owner(p)) {
int status;
@@ -1540,10 +1665,37 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
return IRQ_NONE;
}
- if (priv->plat->has_gmac)
- /* To handle GMAC own interrupts */
- priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr);
+ /* To handle GMAC own interrupts */
+ if (priv->plat->has_gmac) {
+ int status = priv->hw->mac->host_irq_status((void __iomem *)
+ dev->base_addr);
+ if (unlikely(status)) {
+ if (status & core_mmc_tx_irq)
+ priv->xstats.mmc_tx_irq_n++;
+ if (status & core_mmc_rx_irq)
+ priv->xstats.mmc_rx_irq_n++;
+ if (status & core_mmc_rx_csum_offload_irq)
+ priv->xstats.mmc_rx_csum_offload_irq_n++;
+ if (status & core_irq_receive_pmt_irq)
+ priv->xstats.irq_receive_pmt_irq_n++;
+
+ /* For LPI we need to save the tx status */
+ if (status & core_irq_tx_path_in_lpi_mode) {
+ priv->xstats.irq_tx_path_in_lpi_mode_n++;
+ priv->tx_path_in_lpi_mode = true;
+ }
+ if (status & core_irq_tx_path_exit_lpi_mode) {
+ priv->xstats.irq_tx_path_exit_lpi_mode_n++;
+ priv->tx_path_in_lpi_mode = false;
+ }
+ if (status & core_irq_rx_path_in_lpi_mode)
+ priv->xstats.irq_rx_path_in_lpi_mode_n++;
+ if (status & core_irq_rx_path_exit_lpi_mode)
+ priv->xstats.irq_rx_path_exit_lpi_mode_n++;
+ }
+ }
+ /* To handle DMA interrupts */
stmmac_dma_interrupt(priv);
return IRQ_HANDLED;
@@ -1861,6 +2013,8 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
/**
* stmmac_dvr_probe
* @device: device pointer
+ * @plat_dat: platform data pointer
+ * @addr: iobase memory address
* Description: this is the main probe function used to
* call the alloc_etherdev, allocate the priv structure.
*/
@@ -2090,6 +2244,34 @@ int stmmac_restore(struct net_device *ndev)
}
#endif /* CONFIG_PM */
+/* Driver can be configured w/ and w/ both PCI and Platf drivers
+ * depending on the configuration selected.
+ */
+static int __init stmmac_init(void)
+{
+ int err_plt = 0;
+ int err_pci = 0;
+
+ err_plt = stmmac_register_platform();
+ err_pci = stmmac_register_pci();
+
+ if ((err_pci) && (err_plt)) {
+ pr_err("stmmac: driver registration failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void __exit stmmac_exit(void)
+{
+ stmmac_unregister_platform();
+ stmmac_unregister_pci();
+}
+
+module_init(stmmac_init);
+module_exit(stmmac_exit);
+
#ifndef MODULE
static int __init stmmac_cmdline_opt(char *str)
{
@@ -2099,42 +2281,38 @@ static int __init stmmac_cmdline_opt(char *str)
return -EINVAL;
while ((opt = strsep(&str, ",")) != NULL) {
if (!strncmp(opt, "debug:", 6)) {
- if (strict_strtoul(opt + 6, 0, (unsigned long *)&debug))
+ if (kstrtoint(opt + 6, 0, &debug))
goto err;
} else if (!strncmp(opt, "phyaddr:", 8)) {
- if (strict_strtoul(opt + 8, 0,
- (unsigned long *)&phyaddr))
+ if (kstrtoint(opt + 8, 0, &phyaddr))
goto err;
} else if (!strncmp(opt, "dma_txsize:", 11)) {
- if (strict_strtoul(opt + 11, 0,
- (unsigned long *)&dma_txsize))
+ if (kstrtoint(opt + 11, 0, &dma_txsize))
goto err;
} else if (!strncmp(opt, "dma_rxsize:", 11)) {
- if (strict_strtoul(opt + 11, 0,
- (unsigned long *)&dma_rxsize))
+ if (kstrtoint(opt + 11, 0, &dma_rxsize))
goto err;
} else if (!strncmp(opt, "buf_sz:", 7)) {
- if (strict_strtoul(opt + 7, 0,
- (unsigned long *)&buf_sz))
+ if (kstrtoint(opt + 7, 0, &buf_sz))
goto err;
} else if (!strncmp(opt, "tc:", 3)) {
- if (strict_strtoul(opt + 3, 0, (unsigned long *)&tc))
+ if (kstrtoint(opt + 3, 0, &tc))
goto err;
} else if (!strncmp(opt, "watchdog:", 9)) {
- if (strict_strtoul(opt + 9, 0,
- (unsigned long *)&watchdog))
+ if (kstrtoint(opt + 9, 0, &watchdog))
goto err;
} else if (!strncmp(opt, "flow_ctrl:", 10)) {
- if (strict_strtoul(opt + 10, 0,
- (unsigned long *)&flow_ctrl))
+ if (kstrtoint(opt + 10, 0, &flow_ctrl))
goto err;
} else if (!strncmp(opt, "pause:", 6)) {
- if (strict_strtoul(opt + 6, 0, (unsigned long *)&pause))
+ if (kstrtoint(opt + 6, 0, &pause))
+ goto err;
+ } else if (!strncmp(opt, "eee_timer:", 6)) {
+ if (kstrtoint(opt + 10, 0, &eee_timer))
goto err;
#ifdef CONFIG_STMMAC_TIMER
} else if (!strncmp(opt, "tmrate:", 7)) {
- if (strict_strtoul(opt + 7, 0,
- (unsigned long *)&tmrate))
+ if (kstrtoint(opt + 7, 0, &tmrate))
goto err;
#endif
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 58fab53..13afb8e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -125,7 +125,7 @@ err_out_req_reg_failed:
}
/**
- * stmmac_dvr_remove
+ * stmmac_pci_remove
*
* @pdev: platform device pointer
* Description: this function calls the main to free the net resources
@@ -179,7 +179,7 @@ static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = {
MODULE_DEVICE_TABLE(pci, stmmac_id_table);
-static struct pci_driver stmmac_driver = {
+struct pci_driver stmmac_pci_driver = {
.name = STMMAC_RESOURCE_NAME,
.id_table = stmmac_id_table,
.probe = stmmac_pci_probe,
@@ -190,33 +190,6 @@ static struct pci_driver stmmac_driver = {
#endif
};
-/**
- * stmmac_init_module - Entry point for the driver
- * Description: This function is the entry point for the driver.
- */
-static int __init stmmac_init_module(void)
-{
- int ret;
-
- ret = pci_register_driver(&stmmac_driver);
- if (ret < 0)
- pr_err("%s: ERROR: driver registration failed\n", __func__);
-
- return ret;
-}
-
-/**
- * stmmac_cleanup_module - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver.
- */
-static void __exit stmmac_cleanup_module(void)
-{
- pci_unregister_driver(&stmmac_driver);
-}
-
-module_init(stmmac_init_module);
-module_exit(stmmac_cleanup_module);
-
MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver");
MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 3dd8f08..7d36163 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -156,6 +156,8 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
if (priv->wol_irq == -ENXIO)
priv->wol_irq = priv->dev->irq;
+ priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+
platform_set_drvdata(pdev, priv->dev);
pr_debug("STMMAC platform driver registration completed");
@@ -190,7 +192,7 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- iounmap((void *)priv->ioaddr);
+ iounmap((void __force __iomem *)priv->ioaddr);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
@@ -255,7 +257,7 @@ static const struct of_device_id stmmac_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
-static struct platform_driver stmmac_driver = {
+struct platform_driver stmmac_pltfr_driver = {
.probe = stmmac_pltfr_probe,
.remove = stmmac_pltfr_remove,
.driver = {
@@ -266,8 +268,6 @@ static struct platform_driver stmmac_driver = {
},
};
-module_platform_driver(stmmac_driver);
-
MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 703c8cc..8c726b7 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3598,7 +3598,6 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
{
struct netdev_queue *txq;
- unsigned int tx_bytes;
u16 pkt_cnt, tmp;
int cons, index;
u64 cs;
@@ -3621,18 +3620,12 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
netif_printk(np, tx_done, KERN_DEBUG, np->dev,
"%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
- tx_bytes = 0;
- tmp = pkt_cnt;
- while (tmp--) {
- tx_bytes += rp->tx_buffs[cons].skb->len;
+ while (pkt_cnt--)
cons = release_tx_packet(np, rp, cons);
- }
rp->cons = cons;
smp_mb();
- netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes);
-
out:
if (unlikely(netif_tx_queue_stopped(txq) &&
(niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
@@ -4333,7 +4326,6 @@ static void niu_free_channels(struct niu *np)
struct tx_ring_info *rp = &np->tx_rings[i];
niu_free_tx_ring_info(np, rp);
- netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i));
}
kfree(np->tx_rings);
np->tx_rings = NULL;
@@ -6739,8 +6731,6 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
prod = NEXT_TX(rp, prod);
}
- netdev_tx_sent_queue(txq, skb->len);
-
if (prod < rp->prod)
rp->wrap_bit ^= TX_RING_KICK_WRAP;
rp->prod = prod;
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 2a83fc5..967fe8c 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -233,7 +233,6 @@ static void bigmac_init_rings(struct bigmac *bp, int from_irq)
continue;
bp->rx_skbs[i] = skb;
- skb->dev = dev;
/* Because we reserve afterwards. */
skb_put(skb, ETH_FRAME_LEN);
@@ -838,7 +837,6 @@ static void bigmac_rx(struct bigmac *bp)
RX_BUF_ALLOC_SIZE - 34,
DMA_FROM_DEVICE);
bp->rx_skbs[elem] = new_skb;
- new_skb->dev = bp->dev;
skb_put(new_skb, ETH_FRAME_LEN);
skb_reserve(new_skb, 34);
this->rx_addr =
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 3cf4ab7..9ae12d0 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -752,7 +752,6 @@ static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size
if (likely(skb)) {
unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data);
skb_reserve(skb, offset);
- skb->dev = dev;
}
return skb;
}
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index dfc00c4..73f341b 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1249,7 +1249,6 @@ static void happy_meal_clean_rings(struct happy_meal *hp)
static void happy_meal_init_rings(struct happy_meal *hp)
{
struct hmeal_init_block *hb = hp->happy_block;
- struct net_device *dev = hp->dev;
int i;
HMD(("happy_meal_init_rings: counters to zero, "));
@@ -1270,7 +1269,6 @@ static void happy_meal_init_rings(struct happy_meal *hp)
continue;
}
hp->rx_skbs[i] = skb;
- skb->dev = dev;
/* Because we reserve afterwards. */
skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
@@ -2031,7 +2029,6 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
}
dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
hp->rx_skbs[elem] = new_skb;
- new_skb->dev = dev;
skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 7d4a040..aeded7f 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -441,7 +441,7 @@ static void qe_rx(struct sunqe *qep)
} else {
skb_reserve(skb, 2);
skb_put(skb, len);
- skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf,
+ skb_copy_to_linear_data(skb, this_qbuf,
len);
skb->protocol = eth_type_trans(skb, qep->dev);
netif_rx(skb);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 447a693..6ce9edd 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -137,14 +137,15 @@ static void print_eth_id(struct net_device *ndev)
#define bdx_disable_interrupts(priv) \
do { WRITE_REG(priv, regIMR, 0); } while (0)
-/* bdx_fifo_init
- * create TX/RX descriptor fifo for host-NIC communication.
+/**
+ * bdx_fifo_init - create TX/RX descriptor fifo for host-NIC communication.
+ * @priv: NIC private structure
+ * @f: fifo to initialize
+ * @fsz_type: fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB
+ * @reg_XXX: offsets of registers relative to base address
+ *
* 1K extra space is allocated at the end of the fifo to simplify
* processing of descriptors that wraps around fifo's end
- * @priv - NIC private structure
- * @f - fifo to initialize
- * @fsz_type - fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB
- * @reg_XXX - offsets of registers relative to base address
*
* Returns 0 on success, negative value on failure
*
@@ -177,9 +178,10 @@ bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
RET(0);
}
-/* bdx_fifo_free - free all resources used by fifo
- * @priv - NIC private structure
- * @f - fifo to release
+/**
+ * bdx_fifo_free - free all resources used by fifo
+ * @priv: NIC private structure
+ * @f: fifo to release
*/
static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
{
@@ -192,9 +194,9 @@ static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
RET();
}
-/*
+/**
* bdx_link_changed - notifies OS about hw link state.
- * @bdx_priv - hw adapter structure
+ * @priv: hw adapter structure
*/
static void bdx_link_changed(struct bdx_priv *priv)
{
@@ -233,10 +235,10 @@ static void bdx_isr_extra(struct bdx_priv *priv, u32 isr)
}
-/* bdx_isr - Interrupt Service Routine for Bordeaux NIC
- * @irq - interrupt number
- * @ndev - network device
- * @regs - CPU registers
+/**
+ * bdx_isr_napi - Interrupt Service Routine for Bordeaux NIC
+ * @irq: interrupt number
+ * @dev: network device
*
* Return IRQ_NONE if it was not our interrupt, IRQ_HANDLED - otherwise
*
@@ -307,8 +309,10 @@ static int bdx_poll(struct napi_struct *napi, int budget)
return work_done;
}
-/* bdx_fw_load - loads firmware to NIC
- * @priv - NIC private structure
+/**
+ * bdx_fw_load - loads firmware to NIC
+ * @priv: NIC private structure
+ *
* Firmware is loaded via TXD fifo, so it must be initialized first.
* Firware must be loaded once per NIC not per PCI device provided by NIC (NIC
* can have few of them). So all drivers use semaphore register to choose one
@@ -380,8 +384,9 @@ static void bdx_restore_mac(struct net_device *ndev, struct bdx_priv *priv)
RET();
}
-/* bdx_hw_start - inits registers and starts HW's Rx and Tx engines
- * @priv - NIC private structure
+/**
+ * bdx_hw_start - inits registers and starts HW's Rx and Tx engines
+ * @priv: NIC private structure
*/
static int bdx_hw_start(struct bdx_priv *priv)
{
@@ -691,12 +696,13 @@ static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
RET(-EOPNOTSUPP);
}
-/*
+/**
* __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid
- * by passing VLAN filter table to hardware
- * @ndev network device
- * @vid VLAN vid
- * @op add or kill operation
+ * @ndev: network device
+ * @vid: VLAN vid
+ * @op: add or kill operation
+ *
+ * Passes VLAN filter table to hardware
*/
static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
{
@@ -722,10 +728,10 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
RET();
}
-/*
+/**
* bdx_vlan_rx_add_vid - kernel hook for adding VLAN vid to hw filtering table
- * @ndev network device
- * @vid VLAN vid to add
+ * @ndev: network device
+ * @vid: VLAN vid to add
*/
static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
{
@@ -733,10 +739,10 @@ static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
return 0;
}
-/*
+/**
* bdx_vlan_rx_kill_vid - kernel hook for killing VLAN vid in hw filtering table
- * @ndev network device
- * @vid VLAN vid to kill
+ * @ndev: network device
+ * @vid: VLAN vid to kill
*/
static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
{
@@ -974,8 +980,9 @@ static inline void bdx_rxdb_free_elem(struct rxdb *db, int n)
* Rx Init *
*************************************************************************/
-/* bdx_rx_init - initialize RX all related HW and SW resources
- * @priv - NIC private structure
+/**
+ * bdx_rx_init - initialize RX all related HW and SW resources
+ * @priv: NIC private structure
*
* Returns 0 on success, negative value on failure
*
@@ -1016,9 +1023,10 @@ err_mem:
return -ENOMEM;
}
-/* bdx_rx_free_skbs - frees and unmaps all skbs allocated for the fifo
- * @priv - NIC private structure
- * @f - RXF fifo
+/**
+ * bdx_rx_free_skbs - frees and unmaps all skbs allocated for the fifo
+ * @priv: NIC private structure
+ * @f: RXF fifo
*/
static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
{
@@ -1045,8 +1053,10 @@ static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
}
}
-/* bdx_rx_free - release all Rx resources
- * @priv - NIC private structure
+/**
+ * bdx_rx_free - release all Rx resources
+ * @priv: NIC private structure
+ *
* It assumes that Rx is desabled in HW
*/
static void bdx_rx_free(struct bdx_priv *priv)
@@ -1067,9 +1077,11 @@ static void bdx_rx_free(struct bdx_priv *priv)
* Rx Engine *
*************************************************************************/
-/* bdx_rx_alloc_skbs - fill rxf fifo with new skbs
- * @priv - nic's private structure
- * @f - RXF fifo that needs skbs
+/**
+ * bdx_rx_alloc_skbs - fill rxf fifo with new skbs
+ * @priv: nic's private structure
+ * @f: RXF fifo that needs skbs
+ *
* It allocates skbs, build rxf descs and push it (rxf descr) into rxf fifo.
* skb's virtual and physical addresses are stored in skb db.
* To calculate free space, func uses cached values of RPTR and WPTR
@@ -1179,13 +1191,15 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
RET();
}
-/* bdx_rx_receive - receives full packets from RXD fifo and pass them to OS
+/**
+ * bdx_rx_receive - receives full packets from RXD fifo and pass them to OS
* NOTE: a special treatment is given to non-continuous descriptors
* that start near the end, wraps around and continue at the beginning. a second
* part is copied right after the first, and then descriptor is interpreted as
* normal. fifo has an extra space to allow such operations
- * @priv - nic's private structure
- * @f - RXF fifo that needs skbs
+ * @priv: nic's private structure
+ * @f: RXF fifo that needs skbs
+ * @budget: maximum number of packets to receive
*/
/* TBD: replace memcpy func call by explicite inline asm */
@@ -1375,9 +1389,10 @@ static inline int bdx_tx_db_size(struct txdb *db)
return db->size - taken;
}
-/* __bdx_tx_ptr_next - helper function, increment read/write pointer + wrap
- * @d - tx data base
- * @ptr - read or write pointer
+/**
+ * __bdx_tx_db_ptr_next - helper function, increment read/write pointer + wrap
+ * @db: tx data base
+ * @pptr: read or write pointer
*/
static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr)
{
@@ -1394,8 +1409,9 @@ static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr)
*pptr = db->start;
}
-/* bdx_tx_db_inc_rptr - increment read pointer
- * @d - tx data base
+/**
+ * bdx_tx_db_inc_rptr - increment read pointer
+ * @db: tx data base
*/
static inline void bdx_tx_db_inc_rptr(struct txdb *db)
{
@@ -1403,8 +1419,9 @@ static inline void bdx_tx_db_inc_rptr(struct txdb *db)
__bdx_tx_db_ptr_next(db, &db->rptr);
}
-/* bdx_tx_db_inc_rptr - increment write pointer
- * @d - tx data base
+/**
+ * bdx_tx_db_inc_wptr - increment write pointer
+ * @db: tx data base
*/
static inline void bdx_tx_db_inc_wptr(struct txdb *db)
{
@@ -1413,9 +1430,11 @@ static inline void bdx_tx_db_inc_wptr(struct txdb *db)
a result of write */
}
-/* bdx_tx_db_init - creates and initializes tx db
- * @d - tx data base
- * @sz_type - size of tx fifo
+/**
+ * bdx_tx_db_init - creates and initializes tx db
+ * @d: tx data base
+ * @sz_type: size of tx fifo
+ *
* Returns 0 on success, error code otherwise
*/
static int bdx_tx_db_init(struct txdb *d, int sz_type)
@@ -1441,8 +1460,9 @@ static int bdx_tx_db_init(struct txdb *d, int sz_type)
return 0;
}
-/* bdx_tx_db_close - closes tx db and frees all memory
- * @d - tx data base
+/**
+ * bdx_tx_db_close - closes tx db and frees all memory
+ * @d: tx data base
*/
static void bdx_tx_db_close(struct txdb *d)
{
@@ -1463,9 +1483,11 @@ static struct {
u16 qwords; /* qword = 64 bit */
} txd_sizes[MAX_SKB_FRAGS + 1];
-/* txdb_map_skb - creates and stores dma mappings for skb's data blocks
- * @priv - NIC private structure
- * @skb - socket buffer to map
+/**
+ * bdx_tx_map_skb - creates and stores dma mappings for skb's data blocks
+ * @priv: NIC private structure
+ * @skb: socket buffer to map
+ * @txdd: TX descriptor to use
*
* It makes dma mappings for skb's data blocks and writes them to PBL of
* new tx descriptor. It also stores them in the tx db, so they could be
@@ -1562,9 +1584,10 @@ err_mem:
return -ENOMEM;
}
-/*
+/**
* bdx_tx_space - calculates available space in TX fifo
- * @priv - NIC private structure
+ * @priv: NIC private structure
+ *
* Returns available space in TX fifo in bytes
*/
static inline int bdx_tx_space(struct bdx_priv *priv)
@@ -1579,9 +1602,10 @@ static inline int bdx_tx_space(struct bdx_priv *priv)
return fsize;
}
-/* bdx_tx_transmit - send packet to NIC
- * @skb - packet to send
- * ndev - network device assigned to NIC
+/**
+ * bdx_tx_transmit - send packet to NIC
+ * @skb: packet to send
+ * @ndev: network device assigned to NIC
* Return codes:
* o NETDEV_TX_OK everything ok.
* o NETDEV_TX_BUSY Cannot transmit packet, try later
@@ -1699,8 +1723,10 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-/* bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ.
- * @priv - bdx adapter
+/**
+ * bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ.
+ * @priv: bdx adapter
+ *
* It scans TXF fifo for descriptors, frees DMA mappings and reports to OS
* that those packets were sent
*/
@@ -1761,7 +1787,8 @@ static void bdx_tx_cleanup(struct bdx_priv *priv)
spin_unlock(&priv->tx_lock);
}
-/* bdx_tx_free_skbs - frees all skbs from TXD fifo.
+/**
+ * bdx_tx_free_skbs - frees all skbs from TXD fifo.
* It gets called when OS stops this dev, eg upon "ifconfig down" or rmmod
*/
static void bdx_tx_free_skbs(struct bdx_priv *priv)
@@ -1790,10 +1817,11 @@ static void bdx_tx_free(struct bdx_priv *priv)
bdx_tx_db_close(&priv->txdb);
}
-/* bdx_tx_push_desc - push descriptor to TxD fifo
- * @priv - NIC private structure
- * @data - desc's data
- * @size - desc's size
+/**
+ * bdx_tx_push_desc - push descriptor to TxD fifo
+ * @priv: NIC private structure
+ * @data: desc's data
+ * @size: desc's size
*
* Pushes desc to TxD fifo and overlaps it if needed.
* NOTE: this func does not check for available space. this is responsibility
@@ -1819,10 +1847,11 @@ static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size)
WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
}
-/* bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way
- * @priv - NIC private structure
- * @data - desc's data
- * @size - desc's size
+/**
+ * bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way
+ * @priv: NIC private structure
+ * @data: desc's data
+ * @size: desc's size
*
* NOTE: this func does check for available space and, if necessary, waits for
* NIC to read existing data before writing new one.
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index d614c37..3b5c457 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/device.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 4da93a5..ab0bbb7 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -375,7 +375,7 @@ static char *emac_rxhost_errcodes[16] = {
#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg)))
/**
- * emac_dump_regs: Dump important EMAC registers to debug terminal
+ * emac_dump_regs - Dump important EMAC registers to debug terminal
* @priv: The DaVinci EMAC private adapter structure
*
* Executes ethtool set cmd & sets phy mode
@@ -466,7 +466,7 @@ static void emac_dump_regs(struct emac_priv *priv)
}
/**
- * emac_get_drvinfo: Get EMAC driver information
+ * emac_get_drvinfo - Get EMAC driver information
* @ndev: The DaVinci EMAC network adapter
* @info: ethtool info structure containing name and version
*
@@ -481,7 +481,7 @@ static void emac_get_drvinfo(struct net_device *ndev,
}
/**
- * emac_get_settings: Get EMAC settings
+ * emac_get_settings - Get EMAC settings
* @ndev: The DaVinci EMAC network adapter
* @ecmd: ethtool command
*
@@ -500,7 +500,7 @@ static int emac_get_settings(struct net_device *ndev,
}
/**
- * emac_set_settings: Set EMAC settings
+ * emac_set_settings - Set EMAC settings
* @ndev: The DaVinci EMAC network adapter
* @ecmd: ethtool command
*
@@ -518,7 +518,7 @@ static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
}
/**
- * emac_get_coalesce : Get interrupt coalesce settings for this device
+ * emac_get_coalesce - Get interrupt coalesce settings for this device
* @ndev : The DaVinci EMAC network adapter
* @coal : ethtool coalesce settings structure
*
@@ -536,7 +536,7 @@ static int emac_get_coalesce(struct net_device *ndev,
}
/**
- * emac_set_coalesce : Set interrupt coalesce settings for this device
+ * emac_set_coalesce - Set interrupt coalesce settings for this device
* @ndev : The DaVinci EMAC network adapter
* @coal : ethtool coalesce settings structure
*
@@ -614,11 +614,9 @@ static int emac_set_coalesce(struct net_device *ndev,
}
-/**
- * ethtool_ops: DaVinci EMAC Ethtool structure
+/* ethtool_ops: DaVinci EMAC Ethtool structure
*
* Ethtool support for EMAC adapter
- *
*/
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = emac_get_drvinfo,
@@ -631,7 +629,7 @@ static const struct ethtool_ops ethtool_ops = {
};
/**
- * emac_update_phystatus: Update Phy status
+ * emac_update_phystatus - Update Phy status
* @priv: The DaVinci EMAC private adapter structure
*
* Updates phy status and takes action for network queue if required
@@ -697,7 +695,7 @@ static void emac_update_phystatus(struct emac_priv *priv)
}
/**
- * hash_get: Calculate hash value from mac address
+ * hash_get - Calculate hash value from mac address
* @addr: mac address to delete from hash table
*
* Calculates hash value from mac address
@@ -723,9 +721,9 @@ static u32 hash_get(u8 *addr)
}
/**
- * hash_add: Hash function to add mac addr from hash table
+ * hash_add - Hash function to add mac addr from hash table
* @priv: The DaVinci EMAC private adapter structure
- * mac_addr: mac address to delete from hash table
+ * @mac_addr: mac address to delete from hash table
*
* Adds mac address to the internal hash table
*
@@ -765,9 +763,9 @@ static int hash_add(struct emac_priv *priv, u8 *mac_addr)
}
/**
- * hash_del: Hash function to delete mac addr from hash table
+ * hash_del - Hash function to delete mac addr from hash table
* @priv: The DaVinci EMAC private adapter structure
- * mac_addr: mac address to delete from hash table
+ * @mac_addr: mac address to delete from hash table
*
* Removes mac address from the internal hash table
*
@@ -807,7 +805,7 @@ static int hash_del(struct emac_priv *priv, u8 *mac_addr)
#define EMAC_ALL_MULTI_CLR 3
/**
- * emac_add_mcast: Set multicast address in the EMAC adapter (Internal)
+ * emac_add_mcast - Set multicast address in the EMAC adapter (Internal)
* @priv: The DaVinci EMAC private adapter structure
* @action: multicast operation to perform
* mac_addr: mac address to set
@@ -855,7 +853,7 @@ static void emac_add_mcast(struct emac_priv *priv, u32 action, u8 *mac_addr)
}
/**
- * emac_dev_mcast_set: Set multicast address in the EMAC adapter
+ * emac_dev_mcast_set - Set multicast address in the EMAC adapter
* @ndev: The DaVinci EMAC network adapter
*
* Set multicast addresses in EMAC adapter
@@ -901,7 +899,7 @@ static void emac_dev_mcast_set(struct net_device *ndev)
*************************************************************************/
/**
- * emac_int_disable: Disable EMAC module interrupt (from adapter)
+ * emac_int_disable - Disable EMAC module interrupt (from adapter)
* @priv: The DaVinci EMAC private adapter structure
*
* Disable EMAC interrupt on the adapter
@@ -931,7 +929,7 @@ static void emac_int_disable(struct emac_priv *priv)
}
/**
- * emac_int_enable: Enable EMAC module interrupt (from adapter)
+ * emac_int_enable - Enable EMAC module interrupt (from adapter)
* @priv: The DaVinci EMAC private adapter structure
*
* Enable EMAC interrupt on the adapter
@@ -967,7 +965,7 @@ static void emac_int_enable(struct emac_priv *priv)
}
/**
- * emac_irq: EMAC interrupt handler
+ * emac_irq - EMAC interrupt handler
* @irq: interrupt number
* @dev_id: EMAC network adapter data structure ptr
*
@@ -1060,7 +1058,7 @@ static void emac_tx_handler(void *token, int len, int status)
}
/**
- * emac_dev_xmit: EMAC Transmit function
+ * emac_dev_xmit - EMAC Transmit function
* @skb: SKB pointer
* @ndev: The DaVinci EMAC network adapter
*
@@ -1111,7 +1109,7 @@ fail_tx:
}
/**
- * emac_dev_tx_timeout: EMAC Transmit timeout function
+ * emac_dev_tx_timeout - EMAC Transmit timeout function
* @ndev: The DaVinci EMAC network adapter
*
* Called when system detects that a skb timeout period has expired
@@ -1138,7 +1136,7 @@ static void emac_dev_tx_timeout(struct net_device *ndev)
}
/**
- * emac_set_type0addr: Set EMAC Type0 mac address
+ * emac_set_type0addr - Set EMAC Type0 mac address
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
* @mac_addr: MAC address to set in device
@@ -1165,7 +1163,7 @@ static void emac_set_type0addr(struct emac_priv *priv, u32 ch, char *mac_addr)
}
/**
- * emac_set_type1addr: Set EMAC Type1 mac address
+ * emac_set_type1addr - Set EMAC Type1 mac address
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
* @mac_addr: MAC address to set in device
@@ -1187,7 +1185,7 @@ static void emac_set_type1addr(struct emac_priv *priv, u32 ch, char *mac_addr)
}
/**
- * emac_set_type2addr: Set EMAC Type2 mac address
+ * emac_set_type2addr - Set EMAC Type2 mac address
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
* @mac_addr: MAC address to set in device
@@ -1213,7 +1211,7 @@ static void emac_set_type2addr(struct emac_priv *priv, u32 ch,
}
/**
- * emac_setmac: Set mac address in the adapter (internal function)
+ * emac_setmac - Set mac address in the adapter (internal function)
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
* @mac_addr: MAC address to set in device
@@ -1242,7 +1240,7 @@ static void emac_setmac(struct emac_priv *priv, u32 ch, char *mac_addr)
}
/**
- * emac_dev_setmac_addr: Set mac address in the adapter
+ * emac_dev_setmac_addr - Set mac address in the adapter
* @ndev: The DaVinci EMAC network adapter
* @addr: MAC address to set in device
*
@@ -1277,7 +1275,7 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
}
/**
- * emac_hw_enable: Enable EMAC hardware for packet transmission/reception
+ * emac_hw_enable - Enable EMAC hardware for packet transmission/reception
* @priv: The DaVinci EMAC private adapter structure
*
* Enables EMAC hardware for packet processing - enables PHY, enables RX
@@ -1347,7 +1345,7 @@ static int emac_hw_enable(struct emac_priv *priv)
}
/**
- * emac_poll: EMAC NAPI Poll function
+ * emac_poll - EMAC NAPI Poll function
* @ndev: The DaVinci EMAC network adapter
* @budget: Number of receive packets to process (as told by NAPI layer)
*
@@ -1430,7 +1428,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_NET_POLL_CONTROLLER
/**
- * emac_poll_controller: EMAC Poll controller function
+ * emac_poll_controller - EMAC Poll controller function
* @ndev: The DaVinci EMAC network adapter
*
* Polled functionality used by netconsole and others in non interrupt mode
@@ -1489,7 +1487,7 @@ static void emac_adjust_link(struct net_device *ndev)
*************************************************************************/
/**
- * emac_devioctl: EMAC adapter ioctl
+ * emac_devioctl - EMAC adapter ioctl
* @ndev: The DaVinci EMAC network adapter
* @ifrq: request parameter
* @cmd: command parameter
@@ -1516,7 +1514,7 @@ static int match_first_device(struct device *dev, void *data)
}
/**
- * emac_dev_open: EMAC device open
+ * emac_dev_open - EMAC device open
* @ndev: The DaVinci EMAC network adapter
*
* Called when system wants to start the interface. We init TX/RX channels
@@ -1649,7 +1647,7 @@ rollback:
}
/**
- * emac_dev_stop: EMAC device stop
+ * emac_dev_stop - EMAC device stop
* @ndev: The DaVinci EMAC network adapter
*
* Called when system wants to stop or down the interface. We stop the network
@@ -1691,7 +1689,7 @@ static int emac_dev_stop(struct net_device *ndev)
}
/**
- * emac_dev_getnetstats: EMAC get statistics function
+ * emac_dev_getnetstats - EMAC get statistics function
* @ndev: The DaVinci EMAC network adapter
*
* Called when system wants to get statistics from the device.
@@ -1763,7 +1761,7 @@ static const struct net_device_ops emac_netdev_ops = {
};
/**
- * davinci_emac_probe: EMAC device probe
+ * davinci_emac_probe - EMAC device probe
* @pdev: The DaVinci EMAC device that we are removing
*
* Called when probing for emac devicesr. We get details of instances and
@@ -1949,7 +1947,7 @@ free_clk:
}
/**
- * davinci_emac_remove: EMAC device remove
+ * davinci_emac_remove - EMAC device remove
* @pdev: The DaVinci EMAC device that we are removing
*
* Called when removing the device driver. We disable clock usage and release
@@ -2015,9 +2013,7 @@ static const struct dev_pm_ops davinci_emac_pm_ops = {
.resume = davinci_emac_resume,
};
-/**
- * davinci_emac_driver: EMAC platform driver structure
- */
+/* davinci_emac_driver: EMAC platform driver structure */
static struct platform_driver davinci_emac_driver = {
.driver = {
.name = "davinci_emac",
@@ -2029,7 +2025,7 @@ static struct platform_driver davinci_emac_driver = {
};
/**
- * davinci_emac_init: EMAC driver module init
+ * davinci_emac_init - EMAC driver module init
*
* Called when initializing the driver. We register the driver with
* the platform.
@@ -2041,7 +2037,7 @@ static int __init davinci_emac_init(void)
late_initcall(davinci_emac_init);
/**
- * davinci_emac_exit: EMAC driver module exit
+ * davinci_emac_exit - EMAC driver module exit
*
* Called when exiting the driver completely. We unregister the driver with
* the platform and exit
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
index 2d9218f..098b1c4 100644
--- a/drivers/net/ethernet/tile/Kconfig
+++ b/drivers/net/ethernet/tile/Kconfig
@@ -7,6 +7,8 @@ config TILE_NET
depends on TILE
default y
select CRC32
+ select TILE_GXIO_MPIPE if TILEGX
+ select HIGH_RES_TIMERS if TILEGX
---help---
This is a standard Linux network device driver for the
on-chip Tilera Gigabit Ethernet and XAUI interfaces.
diff --git a/drivers/net/ethernet/tile/Makefile b/drivers/net/ethernet/tile/Makefile
index f634f14..0ef9eef 100644
--- a/drivers/net/ethernet/tile/Makefile
+++ b/drivers/net/ethernet/tile/Makefile
@@ -4,7 +4,7 @@
obj-$(CONFIG_TILE_NET) += tile_net.o
ifdef CONFIG_TILEGX
-tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o
+tile_net-y := tilegx.o
else
-tile_net-objs := tilepro.o
+tile_net-y := tilepro.o
endif
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
new file mode 100644
index 0000000..83b4b38
--- /dev/null
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -0,0 +1,1898 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/slab.h> /* kmalloc() */
+#include <linux/errno.h> /* error codes */
+#include <linux/types.h> /* size_t */
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/irq.h>
+#include <linux/netdevice.h> /* struct device, and other headers */
+#include <linux/etherdevice.h> /* eth_type_trans */
+#include <linux/skbuff.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/hugetlb.h>
+#include <linux/in6.h>
+#include <linux/timer.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/io.h>
+#include <linux/ctype.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+
+#include <asm/checksum.h>
+#include <asm/homecache.h>
+#include <gxio/mpipe.h>
+#include <arch/sim.h>
+
+/* Default transmit lockup timeout period, in jiffies. */
+#define TILE_NET_TIMEOUT (5 * HZ)
+
+/* The maximum number of distinct channels (idesc.channel is 5 bits). */
+#define TILE_NET_CHANNELS 32
+
+/* Maximum number of idescs to handle per "poll". */
+#define TILE_NET_BATCH 128
+
+/* Maximum number of packets to handle per "poll". */
+#define TILE_NET_WEIGHT 64
+
+/* Number of entries in each iqueue. */
+#define IQUEUE_ENTRIES 512
+
+/* Number of entries in each equeue. */
+#define EQUEUE_ENTRIES 2048
+
+/* Total header bytes per equeue slot. Must be big enough for 2 bytes
+ * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to
+ * 60 bytes of actual TCP header. We round up to align to cache lines.
+ */
+#define HEADER_BYTES 128
+
+/* Maximum completions per cpu per device (must be a power of two).
+ * ISSUE: What is the right number here? If this is too small, then
+ * egress might block waiting for free space in a completions array.
+ * ISSUE: At the least, allocate these only for initialized echannels.
+ */
+#define TILE_NET_MAX_COMPS 64
+
+#define MAX_FRAGS (MAX_SKB_FRAGS + 1)
+
+/* Size of completions data to allocate.
+ * ISSUE: Probably more than needed since we don't use all the channels.
+ */
+#define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps))
+
+/* Size of NotifRing data to allocate. */
+#define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t))
+
+/* Timeout to wake the per-device TX timer after we stop the queue.
+ * We don't want the timeout too short (adds overhead, and might end
+ * up causing stop/wake/stop/wake cycles) or too long (affects performance).
+ * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets.
+ */
+#define TX_TIMER_DELAY_USEC 30
+
+/* Timeout to wake the per-cpu egress timer to free completions. */
+#define EGRESS_TIMER_DELAY_USEC 1000
+
+MODULE_AUTHOR("Tilera Corporation");
+MODULE_LICENSE("GPL");
+
+/* A "packet fragment" (a chunk of memory). */
+struct frag {
+ void *buf;
+ size_t length;
+};
+
+/* A single completion. */
+struct tile_net_comp {
+ /* The "complete_count" when the completion will be complete. */
+ s64 when;
+ /* The buffer to be freed when the completion is complete. */
+ struct sk_buff *skb;
+};
+
+/* The completions for a given cpu and echannel. */
+struct tile_net_comps {
+ /* The completions. */
+ struct tile_net_comp comp_queue[TILE_NET_MAX_COMPS];
+ /* The number of completions used. */
+ unsigned long comp_next;
+ /* The number of completions freed. */
+ unsigned long comp_last;
+};
+
+/* The transmit wake timer for a given cpu and echannel. */
+struct tile_net_tx_wake {
+ struct hrtimer timer;
+ struct net_device *dev;
+};
+
+/* Info for a specific cpu. */
+struct tile_net_info {
+ /* The NAPI struct. */
+ struct napi_struct napi;
+ /* Packet queue. */
+ gxio_mpipe_iqueue_t iqueue;
+ /* Our cpu. */
+ int my_cpu;
+ /* True if iqueue is valid. */
+ bool has_iqueue;
+ /* NAPI flags. */
+ bool napi_added;
+ bool napi_enabled;
+ /* Number of small sk_buffs which must still be provided. */
+ unsigned int num_needed_small_buffers;
+ /* Number of large sk_buffs which must still be provided. */
+ unsigned int num_needed_large_buffers;
+ /* A timer for handling egress completions. */
+ struct hrtimer egress_timer;
+ /* True if "egress_timer" is scheduled. */
+ bool egress_timer_scheduled;
+ /* Comps for each egress channel. */
+ struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
+ /* Transmit wake timer for each egress channel. */
+ struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
+};
+
+/* Info for egress on a particular egress channel. */
+struct tile_net_egress {
+ /* The "equeue". */
+ gxio_mpipe_equeue_t *equeue;
+ /* The headers for TSO. */
+ unsigned char *headers;
+};
+
+/* Info for a specific device. */
+struct tile_net_priv {
+ /* Our network device. */
+ struct net_device *dev;
+ /* The primary link. */
+ gxio_mpipe_link_t link;
+ /* The primary channel, if open, else -1. */
+ int channel;
+ /* The "loopify" egress link, if needed. */
+ gxio_mpipe_link_t loopify_link;
+ /* The "loopify" egress channel, if open, else -1. */
+ int loopify_channel;
+ /* The egress channel (channel or loopify_channel). */
+ int echannel;
+ /* Total stats. */
+ struct net_device_stats stats;
+};
+
+/* Egress info, indexed by "priv->echannel" (lazily created as needed). */
+static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS];
+
+/* Devices currently associated with each channel.
+ * NOTE: The array entry can become NULL after ifconfig down, but
+ * we do not free the underlying net_device structures, so it is
+ * safe to use a pointer after reading it from this array.
+ */
+static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS];
+
+/* A mutex for "tile_net_devs_for_channel". */
+static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
+
+/* The per-cpu info. */
+static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info);
+
+/* The "context" for all devices. */
+static gxio_mpipe_context_t context;
+
+/* Buffer sizes and mpipe enum codes for buffer stacks.
+ * See arch/tile/include/gxio/mpipe.h for the set of possible values.
+ */
+#define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128
+#define BUFFER_SIZE_SMALL 128
+#define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664
+#define BUFFER_SIZE_LARGE 1664
+
+/* The small/large "buffer stacks". */
+static int small_buffer_stack = -1;
+static int large_buffer_stack = -1;
+
+/* Amount of memory allocated for each buffer stack. */
+static size_t buffer_stack_size;
+
+/* The actual memory allocated for the buffer stacks. */
+static void *small_buffer_stack_va;
+static void *large_buffer_stack_va;
+
+/* The buckets. */
+static int first_bucket = -1;
+static int num_buckets = 1;
+
+/* The ingress irq. */
+static int ingress_irq = -1;
+
+/* Text value of tile_net.cpus if passed as a module parameter. */
+static char *network_cpus_string;
+
+/* The actual cpus in "network_cpus". */
+static struct cpumask network_cpus_map;
+
+/* If "loopify=LINK" was specified, this is "LINK". */
+static char *loopify_link_name;
+
+/* If "tile_net.custom" was specified, this is non-NULL. */
+static char *custom_str;
+
+/* The "tile_net.cpus" argument specifies the cpus that are dedicated
+ * to handle ingress packets.
+ *
+ * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where
+ * m, n, x, y are integer numbers that represent the cpus that can be
+ * neither a dedicated cpu nor a dataplane cpu.
+ */
+static bool network_cpus_init(void)
+{
+ char buf[1024];
+ int rc;
+
+ if (network_cpus_string == NULL)
+ return false;
+
+ rc = cpulist_parse_crop(network_cpus_string, &network_cpus_map);
+ if (rc != 0) {
+ pr_warn("tile_net.cpus=%s: malformed cpu list\n",
+ network_cpus_string);
+ return false;
+ }
+
+ /* Remove dedicated cpus. */
+ cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask);
+
+ if (cpumask_empty(&network_cpus_map)) {
+ pr_warn("Ignoring empty tile_net.cpus='%s'.\n",
+ network_cpus_string);
+ return false;
+ }
+
+ cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map);
+ pr_info("Linux network CPUs: %s\n", buf);
+ return true;
+}
+
+module_param_named(cpus, network_cpus_string, charp, 0444);
+MODULE_PARM_DESC(cpus, "cpulist of cores that handle network interrupts");
+
+/* The "tile_net.loopify=LINK" argument causes the named device to
+ * actually use "loop0" for ingress, and "loop1" for egress. This
+ * allows an app to sit between the actual link and linux, passing
+ * (some) packets along to linux, and forwarding (some) packets sent
+ * out by linux.
+ */
+module_param_named(loopify, loopify_link_name, charp, 0444);
+MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress");
+
+/* The "tile_net.custom" argument causes us to ignore the "conventional"
+ * classifier metadata, in particular, the "l2_offset".
+ */
+module_param_named(custom, custom_str, charp, 0444);
+MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier");
+
+/* Atomically update a statistics field.
+ * Note that on TILE-Gx, this operation is fire-and-forget on the
+ * issuing core (single-cycle dispatch) and takes only a few cycles
+ * longer than a regular store when the request reaches the home cache.
+ * No expensive bus management overhead is required.
+ */
+static void tile_net_stats_add(unsigned long value, unsigned long *field)
+{
+ BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(unsigned long));
+ atomic_long_add(value, (atomic_long_t *)field);
+}
+
+/* Allocate and push a buffer. */
+static bool tile_net_provide_buffer(bool small)
+{
+ int stack = small ? small_buffer_stack : large_buffer_stack;
+ const unsigned long buffer_alignment = 128;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(struct sk_buff **) + buffer_alignment;
+ len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE);
+ skb = dev_alloc_skb(len);
+ if (skb == NULL)
+ return false;
+
+ /* Make room for a back-pointer to 'skb' and guarantee alignment. */
+ skb_reserve(skb, sizeof(struct sk_buff **));
+ skb_reserve(skb, -(long)skb->data & (buffer_alignment - 1));
+
+ /* Save a back-pointer to 'skb'. */
+ *(struct sk_buff **)(skb->data - sizeof(struct sk_buff **)) = skb;
+
+ /* Make sure "skb" and the back-pointer have been flushed. */
+ wmb();
+
+ gxio_mpipe_push_buffer(&context, stack,
+ (void *)va_to_tile_io_addr(skb->data));
+
+ return true;
+}
+
+/* Convert a raw mpipe buffer to its matching skb pointer. */
+static struct sk_buff *mpipe_buf_to_skb(void *va)
+{
+ /* Acquire the associated "skb". */
+ struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
+ struct sk_buff *skb = *skb_ptr;
+
+ /* Paranoia. */
+ if (skb->data != va) {
+ /* Panic here since there's a reasonable chance
+ * that corrupt buffers means generic memory
+ * corruption, with unpredictable system effects.
+ */
+ panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p",
+ va, skb, skb->data);
+ }
+
+ return skb;
+}
+
+static void tile_net_pop_all_buffers(int stack)
+{
+ for (;;) {
+ tile_io_addr_t addr =
+ (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack);
+ if (addr == 0)
+ break;
+ dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr)));
+ }
+}
+
+/* Provide linux buffers to mPIPE. */
+static void tile_net_provide_needed_buffers(void)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+
+ while (info->num_needed_small_buffers != 0) {
+ if (!tile_net_provide_buffer(true))
+ goto oops;
+ info->num_needed_small_buffers--;
+ }
+
+ while (info->num_needed_large_buffers != 0) {
+ if (!tile_net_provide_buffer(false))
+ goto oops;
+ info->num_needed_large_buffers--;
+ }
+
+ return;
+
+oops:
+ /* Add a description to the page allocation failure dump. */
+ pr_notice("Tile %d still needs some buffers\n", info->my_cpu);
+}
+
+static inline bool filter_packet(struct net_device *dev, void *buf)
+{
+ /* Filter packets received before we're up. */
+ if (dev == NULL || !(dev->flags & IFF_UP))
+ return true;
+
+ /* Filter out packets that aren't for us. */
+ if (!(dev->flags & IFF_PROMISC) &&
+ !is_multicast_ether_addr(buf) &&
+ compare_ether_addr(dev->dev_addr, buf) != 0)
+ return true;
+
+ return false;
+}
+
+static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
+ gxio_mpipe_idesc_t *idesc, unsigned long len)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ /* Encode the actual packet length. */
+ skb_put(skb, len);
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* Acknowledge "good" hardware checksums. */
+ if (idesc->cs && idesc->csum_seed_val == 0xFFFF)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ netif_receive_skb(skb);
+
+ /* Update stats. */
+ tile_net_stats_add(1, &priv->stats.rx_packets);
+ tile_net_stats_add(len, &priv->stats.rx_bytes);
+
+ /* Need a new buffer. */
+ if (idesc->size == BUFFER_SIZE_SMALL_ENUM)
+ info->num_needed_small_buffers++;
+ else
+ info->num_needed_large_buffers++;
+}
+
+/* Handle a packet. Return true if "processed", false if "filtered". */
+static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct net_device *dev = tile_net_devs_for_channel[idesc->channel];
+ uint8_t l2_offset;
+ void *va;
+ void *buf;
+ unsigned long len;
+ bool filter;
+
+ /* Drop packets for which no buffer was available.
+ * NOTE: This happens under heavy load.
+ */
+ if (idesc->be) {
+ struct tile_net_priv *priv = netdev_priv(dev);
+ tile_net_stats_add(1, &priv->stats.rx_dropped);
+ gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
+ if (net_ratelimit())
+ pr_info("Dropping packet (insufficient buffers).\n");
+ return false;
+ }
+
+ /* Get the "l2_offset", if allowed. */
+ l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
+
+ /* Get the raw buffer VA (includes "headroom"). */
+ va = tile_io_addr_to_va((unsigned long)(long)idesc->va);
+
+ /* Get the actual packet start/length. */
+ buf = va + l2_offset;
+ len = idesc->l2_size - l2_offset;
+
+ /* Point "va" at the raw buffer. */
+ va -= NET_IP_ALIGN;
+
+ filter = filter_packet(dev, buf);
+ if (filter) {
+ gxio_mpipe_iqueue_drop(&info->iqueue, idesc);
+ } else {
+ struct sk_buff *skb = mpipe_buf_to_skb(va);
+
+ /* Skip headroom, and any custom header. */
+ skb_reserve(skb, NET_IP_ALIGN + l2_offset);
+
+ tile_net_receive_skb(dev, skb, idesc, len);
+ }
+
+ gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
+ return !filter;
+}
+
+/* Handle some packets for the current CPU.
+ *
+ * This function handles up to TILE_NET_BATCH idescs per call.
+ *
+ * ISSUE: Since we do not provide new buffers until this function is
+ * complete, we must initially provide enough buffers for each network
+ * cpu to fill its iqueue and also its batched idescs.
+ *
+ * ISSUE: The "rotting packet" race condition occurs if a packet
+ * arrives after the queue appears to be empty, and before the
+ * hypervisor interrupt is re-enabled.
+ */
+static int tile_net_poll(struct napi_struct *napi, int budget)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ unsigned int work = 0;
+ gxio_mpipe_idesc_t *idesc;
+ int i, n;
+
+ /* Process packets. */
+ while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) {
+ for (i = 0; i < n; i++) {
+ if (i == TILE_NET_BATCH)
+ goto done;
+ if (tile_net_handle_packet(idesc + i)) {
+ if (++work >= budget)
+ goto done;
+ }
+ }
+ }
+
+ /* There are no packets left. */
+ napi_complete(&info->napi);
+
+ /* Re-enable hypervisor interrupts. */
+ gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring);
+
+ /* HACK: Avoid the "rotting packet" problem. */
+ if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0)
+ napi_schedule(&info->napi);
+
+ /* ISSUE: Handle completions? */
+
+done:
+ tile_net_provide_needed_buffers();
+
+ return work;
+}
+
+/* Handle an ingress interrupt on the current cpu. */
+static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ napi_schedule(&info->napi);
+ return IRQ_HANDLED;
+}
+
+/* Free some completions. This must be called with interrupts blocked. */
+static int tile_net_free_comps(gxio_mpipe_equeue_t *equeue,
+ struct tile_net_comps *comps,
+ int limit, bool force_update)
+{
+ int n = 0;
+ while (comps->comp_last < comps->comp_next) {
+ unsigned int cid = comps->comp_last % TILE_NET_MAX_COMPS;
+ struct tile_net_comp *comp = &comps->comp_queue[cid];
+ if (!gxio_mpipe_equeue_is_complete(equeue, comp->when,
+ force_update || n == 0))
+ break;
+ dev_kfree_skb_irq(comp->skb);
+ comps->comp_last++;
+ if (++n == limit)
+ break;
+ }
+ return n;
+}
+
+/* Add a completion. This must be called with interrupts blocked.
+ * tile_net_equeue_try_reserve() will have ensured a free completion entry.
+ */
+static void add_comp(gxio_mpipe_equeue_t *equeue,
+ struct tile_net_comps *comps,
+ uint64_t when, struct sk_buff *skb)
+{
+ int cid = comps->comp_next % TILE_NET_MAX_COMPS;
+ comps->comp_queue[cid].when = when;
+ comps->comp_queue[cid].skb = skb;
+ comps->comp_next++;
+}
+
+static void tile_net_schedule_tx_wake_timer(struct net_device *dev)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ hrtimer_start(&info->tx_wake[priv->echannel].timer,
+ ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
+ HRTIMER_MODE_REL_PINNED);
+}
+
+static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t)
+{
+ struct tile_net_tx_wake *tx_wake =
+ container_of(t, struct tile_net_tx_wake, timer);
+ netif_wake_subqueue(tx_wake->dev, smp_processor_id());
+ return HRTIMER_NORESTART;
+}
+
+/* Make sure the egress timer is scheduled. */
+static void tile_net_schedule_egress_timer(void)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+
+ if (!info->egress_timer_scheduled) {
+ hrtimer_start(&info->egress_timer,
+ ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL),
+ HRTIMER_MODE_REL_PINNED);
+ info->egress_timer_scheduled = true;
+ }
+}
+
+/* The "function" for "info->egress_timer".
+ *
+ * This timer will reschedule itself as long as there are any pending
+ * completions expected for this tile.
+ */
+static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ unsigned long irqflags;
+ bool pending = false;
+ int i;
+
+ local_irq_save(irqflags);
+
+ /* The timer is no longer scheduled. */
+ info->egress_timer_scheduled = false;
+
+ /* Free all possible comps for this tile. */
+ for (i = 0; i < TILE_NET_CHANNELS; i++) {
+ struct tile_net_egress *egress = &egress_for_echannel[i];
+ struct tile_net_comps *comps = info->comps_for_echannel[i];
+ if (comps->comp_last >= comps->comp_next)
+ continue;
+ tile_net_free_comps(egress->equeue, comps, -1, true);
+ pending = pending || (comps->comp_last < comps->comp_next);
+ }
+
+ /* Reschedule timer if needed. */
+ if (pending)
+ tile_net_schedule_egress_timer();
+
+ local_irq_restore(irqflags);
+
+ return HRTIMER_NORESTART;
+}
+
+/* Helper function for "tile_net_update()".
+ * "dev" (i.e. arg) is the device being brought up or down,
+ * or NULL if all devices are now down.
+ */
+static void tile_net_update_cpu(void *arg)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct net_device *dev = arg;
+
+ if (!info->has_iqueue)
+ return;
+
+ if (dev != NULL) {
+ if (!info->napi_added) {
+ netif_napi_add(dev, &info->napi,
+ tile_net_poll, TILE_NET_WEIGHT);
+ info->napi_added = true;
+ }
+ if (!info->napi_enabled) {
+ napi_enable(&info->napi);
+ info->napi_enabled = true;
+ }
+ enable_percpu_irq(ingress_irq, 0);
+ } else {
+ disable_percpu_irq(ingress_irq);
+ if (info->napi_enabled) {
+ napi_disable(&info->napi);
+ info->napi_enabled = false;
+ }
+ /* FIXME: Drain the iqueue. */
+ }
+}
+
+/* Helper function for tile_net_open() and tile_net_stop().
+ * Always called under tile_net_devs_for_channel_mutex.
+ */
+static int tile_net_update(struct net_device *dev)
+{
+ static gxio_mpipe_rules_t rules; /* too big to fit on the stack */
+ bool saw_channel = false;
+ int channel;
+ int rc;
+ int cpu;
+
+ gxio_mpipe_rules_init(&rules, &context);
+
+ for (channel = 0; channel < TILE_NET_CHANNELS; channel++) {
+ if (tile_net_devs_for_channel[channel] == NULL)
+ continue;
+ if (!saw_channel) {
+ saw_channel = true;
+ gxio_mpipe_rules_begin(&rules, first_bucket,
+ num_buckets, NULL);
+ gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN);
+ }
+ gxio_mpipe_rules_add_channel(&rules, channel);
+ }
+
+ /* NOTE: This can fail if there is no classifier.
+ * ISSUE: Can anything else cause it to fail?
+ */
+ rc = gxio_mpipe_rules_commit(&rules);
+ if (rc != 0) {
+ netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc);
+ return -EIO;
+ }
+
+ /* Update all cpus, sequentially (to protect "netif_napi_add()"). */
+ for_each_online_cpu(cpu)
+ smp_call_function_single(cpu, tile_net_update_cpu,
+ (saw_channel ? dev : NULL), 1);
+
+ /* HACK: Allow packets to flow in the simulator. */
+ if (saw_channel)
+ sim_enable_mpipe_links(0, -1);
+
+ return 0;
+}
+
+/* Allocate and initialize mpipe buffer stacks, and register them in
+ * the mPIPE TLBs, for both small and large packet sizes.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int init_buffer_stacks(struct net_device *dev, int num_buffers)
+{
+ pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH);
+ int rc;
+
+ /* Compute stack bytes; we round up to 64KB and then use
+ * alloc_pages() so we get the required 64KB alignment as well.
+ */
+ buffer_stack_size =
+ ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers),
+ 64 * 1024);
+
+ /* Allocate two buffer stack indices. */
+ rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0);
+ if (rc < 0) {
+ netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n",
+ rc);
+ return rc;
+ }
+ small_buffer_stack = rc;
+ large_buffer_stack = rc + 1;
+
+ /* Allocate the small memory stack. */
+ small_buffer_stack_va =
+ alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
+ if (small_buffer_stack_va == NULL) {
+ netdev_err(dev,
+ "Could not alloc %zd bytes for buffer stacks\n",
+ buffer_stack_size);
+ return -ENOMEM;
+ }
+ rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack,
+ BUFFER_SIZE_SMALL_ENUM,
+ small_buffer_stack_va,
+ buffer_stack_size, 0);
+ if (rc != 0) {
+ netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc);
+ return rc;
+ }
+ rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack,
+ hash_pte, 0);
+ if (rc != 0) {
+ netdev_err(dev,
+ "gxio_mpipe_register_buffer_memory failed: %d\n",
+ rc);
+ return rc;
+ }
+
+ /* Allocate the large buffer stack. */
+ large_buffer_stack_va =
+ alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
+ if (large_buffer_stack_va == NULL) {
+ netdev_err(dev,
+ "Could not alloc %zd bytes for buffer stacks\n",
+ buffer_stack_size);
+ return -ENOMEM;
+ }
+ rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack,
+ BUFFER_SIZE_LARGE_ENUM,
+ large_buffer_stack_va,
+ buffer_stack_size, 0);
+ if (rc != 0) {
+ netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n",
+ rc);
+ return rc;
+ }
+ rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack,
+ hash_pte, 0);
+ if (rc != 0) {
+ netdev_err(dev,
+ "gxio_mpipe_register_buffer_memory failed: %d\n",
+ rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/* Allocate per-cpu resources (memory for completions and idescs).
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int alloc_percpu_mpipe_resources(struct net_device *dev,
+ int cpu, int ring)
+{
+ struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+ int order, i, rc;
+ struct page *page;
+ void *addr;
+
+ /* Allocate the "comps". */
+ order = get_order(COMPS_SIZE);
+ page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
+ if (page == NULL) {
+ netdev_err(dev, "Failed to alloc %zd bytes comps memory\n",
+ COMPS_SIZE);
+ return -ENOMEM;
+ }
+ addr = pfn_to_kaddr(page_to_pfn(page));
+ memset(addr, 0, COMPS_SIZE);
+ for (i = 0; i < TILE_NET_CHANNELS; i++)
+ info->comps_for_echannel[i] =
+ addr + i * sizeof(struct tile_net_comps);
+
+ /* If this is a network cpu, create an iqueue. */
+ if (cpu_isset(cpu, network_cpus_map)) {
+ order = get_order(NOTIF_RING_SIZE);
+ page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
+ if (page == NULL) {
+ netdev_err(dev,
+ "Failed to alloc %zd bytes iqueue memory\n",
+ NOTIF_RING_SIZE);
+ return -ENOMEM;
+ }
+ addr = pfn_to_kaddr(page_to_pfn(page));
+ rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++,
+ addr, NOTIF_RING_SIZE, 0);
+ if (rc < 0) {
+ netdev_err(dev,
+ "gxio_mpipe_iqueue_init failed: %d\n", rc);
+ return rc;
+ }
+ info->has_iqueue = true;
+ }
+
+ return ring;
+}
+
+/* Initialize NotifGroup and buckets.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int init_notif_group_and_buckets(struct net_device *dev,
+ int ring, int network_cpus_count)
+{
+ int group, rc;
+
+ /* Allocate one NotifGroup. */
+ rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0);
+ if (rc < 0) {
+ netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n",
+ rc);
+ return rc;
+ }
+ group = rc;
+
+ /* Initialize global num_buckets value. */
+ if (network_cpus_count > 4)
+ num_buckets = 256;
+ else if (network_cpus_count > 1)
+ num_buckets = 16;
+
+ /* Allocate some buckets, and set global first_bucket value. */
+ rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0);
+ if (rc < 0) {
+ netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc);
+ return rc;
+ }
+ first_bucket = rc;
+
+ /* Init group and buckets. */
+ rc = gxio_mpipe_init_notif_group_and_buckets(
+ &context, group, ring, network_cpus_count,
+ first_bucket, num_buckets,
+ GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY);
+ if (rc != 0) {
+ netdev_err(
+ dev,
+ "gxio_mpipe_init_notif_group_and_buckets failed: %d\n",
+ rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/* Create an irq and register it, then activate the irq and request
+ * interrupts on all cores. Note that "ingress_irq" being initialized
+ * is how we know not to call tile_net_init_mpipe() again.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int tile_net_setup_interrupts(struct net_device *dev)
+{
+ int cpu, rc;
+
+ rc = create_irq();
+ if (rc < 0) {
+ netdev_err(dev, "create_irq failed: %d\n", rc);
+ return rc;
+ }
+ ingress_irq = rc;
+ tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
+ rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
+ 0, NULL, NULL);
+ if (rc != 0) {
+ netdev_err(dev, "request_irq failed: %d\n", rc);
+ destroy_irq(ingress_irq);
+ ingress_irq = -1;
+ return rc;
+ }
+
+ for_each_online_cpu(cpu) {
+ struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+ if (info->has_iqueue) {
+ gxio_mpipe_request_notif_ring_interrupt(
+ &context, cpu_x(cpu), cpu_y(cpu),
+ 1, ingress_irq, info->iqueue.ring);
+ }
+ }
+
+ return 0;
+}
+
+/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
+static void tile_net_init_mpipe_fail(void)
+{
+ int cpu;
+
+ /* Do cleanups that require the mpipe context first. */
+ if (small_buffer_stack >= 0)
+ tile_net_pop_all_buffers(small_buffer_stack);
+ if (large_buffer_stack >= 0)
+ tile_net_pop_all_buffers(large_buffer_stack);
+
+ /* Destroy mpipe context so the hardware no longer owns any memory. */
+ gxio_mpipe_destroy(&context);
+
+ for_each_online_cpu(cpu) {
+ struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+ free_pages((unsigned long)(info->comps_for_echannel[0]),
+ get_order(COMPS_SIZE));
+ info->comps_for_echannel[0] = NULL;
+ free_pages((unsigned long)(info->iqueue.idescs),
+ get_order(NOTIF_RING_SIZE));
+ info->iqueue.idescs = NULL;
+ }
+
+ if (small_buffer_stack_va)
+ free_pages_exact(small_buffer_stack_va, buffer_stack_size);
+ if (large_buffer_stack_va)
+ free_pages_exact(large_buffer_stack_va, buffer_stack_size);
+
+ small_buffer_stack_va = NULL;
+ large_buffer_stack_va = NULL;
+ large_buffer_stack = -1;
+ small_buffer_stack = -1;
+ first_bucket = -1;
+}
+
+/* The first time any tilegx network device is opened, we initialize
+ * the global mpipe state. If this step fails, we fail to open the
+ * device, but if it succeeds, we never need to do it again, and since
+ * tile_net can't be unloaded, we never undo it.
+ *
+ * Note that some resources in this path (buffer stack indices,
+ * bindings from init_buffer_stack, etc.) are hypervisor resources
+ * that are freed implicitly by gxio_mpipe_destroy().
+ */
+static int tile_net_init_mpipe(struct net_device *dev)
+{
+ int i, num_buffers, rc;
+ int cpu;
+ int first_ring, ring;
+ int network_cpus_count = cpus_weight(network_cpus_map);
+
+ if (!hash_default) {
+ netdev_err(dev, "Networking requires hash_default!\n");
+ return -EIO;
+ }
+
+ rc = gxio_mpipe_init(&context, 0);
+ if (rc != 0) {
+ netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc);
+ return -EIO;
+ }
+
+ /* Set up the buffer stacks. */
+ num_buffers =
+ network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
+ rc = init_buffer_stacks(dev, num_buffers);
+ if (rc != 0)
+ goto fail;
+
+ /* Provide initial buffers. */
+ rc = -ENOMEM;
+ for (i = 0; i < num_buffers; i++) {
+ if (!tile_net_provide_buffer(true)) {
+ netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
+ goto fail;
+ }
+ }
+ for (i = 0; i < num_buffers; i++) {
+ if (!tile_net_provide_buffer(false)) {
+ netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
+ goto fail;
+ }
+ }
+
+ /* Allocate one NotifRing for each network cpu. */
+ rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0);
+ if (rc < 0) {
+ netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n",
+ rc);
+ goto fail;
+ }
+
+ /* Init NotifRings per-cpu. */
+ first_ring = rc;
+ ring = first_ring;
+ for_each_online_cpu(cpu) {
+ rc = alloc_percpu_mpipe_resources(dev, cpu, ring);
+ if (rc < 0)
+ goto fail;
+ ring = rc;
+ }
+
+ /* Initialize NotifGroup and buckets. */
+ rc = init_notif_group_and_buckets(dev, first_ring, network_cpus_count);
+ if (rc != 0)
+ goto fail;
+
+ /* Create and enable interrupts. */
+ rc = tile_net_setup_interrupts(dev);
+ if (rc != 0)
+ goto fail;
+
+ return 0;
+
+fail:
+ tile_net_init_mpipe_fail();
+ return rc;
+}
+
+/* Create persistent egress info for a given egress channel.
+ * Note that this may be shared between, say, "gbe0" and "xgbe0".
+ * ISSUE: Defer header allocation until TSO is actually needed?
+ */
+static int tile_net_init_egress(struct net_device *dev, int echannel)
+{
+ struct page *headers_page, *edescs_page, *equeue_page;
+ gxio_mpipe_edesc_t *edescs;
+ gxio_mpipe_equeue_t *equeue;
+ unsigned char *headers;
+ int headers_order, edescs_order, equeue_order;
+ size_t edescs_size;
+ int edma;
+ int rc = -ENOMEM;
+
+ /* Only initialize once. */
+ if (egress_for_echannel[echannel].equeue != NULL)
+ return 0;
+
+ /* Allocate memory for the "headers". */
+ headers_order = get_order(EQUEUE_ENTRIES * HEADER_BYTES);
+ headers_page = alloc_pages(GFP_KERNEL, headers_order);
+ if (headers_page == NULL) {
+ netdev_warn(dev,
+ "Could not alloc %zd bytes for TSO headers.\n",
+ PAGE_SIZE << headers_order);
+ goto fail;
+ }
+ headers = pfn_to_kaddr(page_to_pfn(headers_page));
+
+ /* Allocate memory for the "edescs". */
+ edescs_size = EQUEUE_ENTRIES * sizeof(*edescs);
+ edescs_order = get_order(edescs_size);
+ edescs_page = alloc_pages(GFP_KERNEL, edescs_order);
+ if (edescs_page == NULL) {
+ netdev_warn(dev,
+ "Could not alloc %zd bytes for eDMA ring.\n",
+ edescs_size);
+ goto fail_headers;
+ }
+ edescs = pfn_to_kaddr(page_to_pfn(edescs_page));
+
+ /* Allocate memory for the "equeue". */
+ equeue_order = get_order(sizeof(*equeue));
+ equeue_page = alloc_pages(GFP_KERNEL, equeue_order);
+ if (equeue_page == NULL) {
+ netdev_warn(dev,
+ "Could not alloc %zd bytes for equeue info.\n",
+ PAGE_SIZE << equeue_order);
+ goto fail_edescs;
+ }
+ equeue = pfn_to_kaddr(page_to_pfn(equeue_page));
+
+ /* Allocate an edma ring. Note that in practice this can't
+ * fail, which is good, because we will leak an edma ring if so.
+ */
+ rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0);
+ if (rc < 0) {
+ netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n",
+ rc);
+ goto fail_equeue;
+ }
+ edma = rc;
+
+ /* Initialize the equeue. */
+ rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel,
+ edescs, edescs_size, 0);
+ if (rc != 0) {
+ netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc);
+ goto fail_equeue;
+ }
+
+ /* Done. */
+ egress_for_echannel[echannel].equeue = equeue;
+ egress_for_echannel[echannel].headers = headers;
+ return 0;
+
+fail_equeue:
+ __free_pages(equeue_page, equeue_order);
+
+fail_edescs:
+ __free_pages(edescs_page, edescs_order);
+
+fail_headers:
+ __free_pages(headers_page, headers_order);
+
+fail:
+ return rc;
+}
+
+/* Return channel number for a newly-opened link. */
+static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
+ const char *link_name)
+{
+ int rc = gxio_mpipe_link_open(link, &context, link_name, 0);
+ if (rc < 0) {
+ netdev_err(dev, "Failed to open '%s'\n", link_name);
+ return rc;
+ }
+ rc = gxio_mpipe_link_channel(link);
+ if (rc < 0 || rc >= TILE_NET_CHANNELS) {
+ netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc);
+ gxio_mpipe_link_close(link);
+ return -EINVAL;
+ }
+ return rc;
+}
+
+/* Help the kernel activate the given network interface. */
+static int tile_net_open(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int cpu, rc;
+
+ mutex_lock(&tile_net_devs_for_channel_mutex);
+
+ /* Do one-time initialization the first time any device is opened. */
+ if (ingress_irq < 0) {
+ rc = tile_net_init_mpipe(dev);
+ if (rc != 0)
+ goto fail;
+ }
+
+ /* Determine if this is the "loopify" device. */
+ if (unlikely((loopify_link_name != NULL) &&
+ !strcmp(dev->name, loopify_link_name))) {
+ rc = tile_net_link_open(dev, &priv->link, "loop0");
+ if (rc < 0)
+ goto fail;
+ priv->channel = rc;
+ rc = tile_net_link_open(dev, &priv->loopify_link, "loop1");
+ if (rc < 0)
+ goto fail;
+ priv->loopify_channel = rc;
+ priv->echannel = rc;
+ } else {
+ rc = tile_net_link_open(dev, &priv->link, dev->name);
+ if (rc < 0)
+ goto fail;
+ priv->channel = rc;
+ priv->echannel = rc;
+ }
+
+ /* Initialize egress info (if needed). Once ever, per echannel. */
+ rc = tile_net_init_egress(dev, priv->echannel);
+ if (rc != 0)
+ goto fail;
+
+ tile_net_devs_for_channel[priv->channel] = dev;
+
+ rc = tile_net_update(dev);
+ if (rc != 0)
+ goto fail;
+
+ mutex_unlock(&tile_net_devs_for_channel_mutex);
+
+ /* Initialize the transmit wake timer for this device for each cpu. */
+ for_each_online_cpu(cpu) {
+ struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+ struct tile_net_tx_wake *tx_wake =
+ &info->tx_wake[priv->echannel];
+
+ hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ tx_wake->timer.function = tile_net_handle_tx_wake_timer;
+ tx_wake->dev = dev;
+ }
+
+ for_each_online_cpu(cpu)
+ netif_start_subqueue(dev, cpu);
+ netif_carrier_on(dev);
+ return 0;
+
+fail:
+ if (priv->loopify_channel >= 0) {
+ if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
+ netdev_warn(dev, "Failed to close loopify link!\n");
+ priv->loopify_channel = -1;
+ }
+ if (priv->channel >= 0) {
+ if (gxio_mpipe_link_close(&priv->link) != 0)
+ netdev_warn(dev, "Failed to close link!\n");
+ priv->channel = -1;
+ }
+ priv->echannel = -1;
+ tile_net_devs_for_channel[priv->channel] = NULL;
+ mutex_unlock(&tile_net_devs_for_channel_mutex);
+
+ /* Don't return raw gxio error codes to generic Linux. */
+ return (rc > -512) ? rc : -EIO;
+}
+
+/* Help the kernel deactivate the given network interface. */
+static int tile_net_stop(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+ struct tile_net_tx_wake *tx_wake =
+ &info->tx_wake[priv->echannel];
+
+ hrtimer_cancel(&tx_wake->timer);
+ netif_stop_subqueue(dev, cpu);
+ }
+
+ mutex_lock(&tile_net_devs_for_channel_mutex);
+ tile_net_devs_for_channel[priv->channel] = NULL;
+ (void)tile_net_update(dev);
+ if (priv->loopify_channel >= 0) {
+ if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
+ netdev_warn(dev, "Failed to close loopify link!\n");
+ priv->loopify_channel = -1;
+ }
+ if (priv->channel >= 0) {
+ if (gxio_mpipe_link_close(&priv->link) != 0)
+ netdev_warn(dev, "Failed to close link!\n");
+ priv->channel = -1;
+ }
+ priv->echannel = -1;
+ mutex_unlock(&tile_net_devs_for_channel_mutex);
+
+ return 0;
+}
+
+/* Determine the VA for a fragment. */
+static inline void *tile_net_frag_buf(skb_frag_t *f)
+{
+ unsigned long pfn = page_to_pfn(skb_frag_page(f));
+ return pfn_to_kaddr(pfn) + f->page_offset;
+}
+
+/* Acquire a completion entry and an egress slot, or if we can't,
+ * stop the queue and schedule the tx_wake timer.
+ */
+static s64 tile_net_equeue_try_reserve(struct net_device *dev,
+ struct tile_net_comps *comps,
+ gxio_mpipe_equeue_t *equeue,
+ int num_edescs)
+{
+ /* Try to acquire a completion entry. */
+ if (comps->comp_next - comps->comp_last < TILE_NET_MAX_COMPS - 1 ||
+ tile_net_free_comps(equeue, comps, 32, false) != 0) {
+
+ /* Try to acquire an egress slot. */
+ s64 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
+ if (slot >= 0)
+ return slot;
+
+ /* Freeing some completions gives the equeue time to drain. */
+ tile_net_free_comps(equeue, comps, TILE_NET_MAX_COMPS, false);
+
+ slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
+ if (slot >= 0)
+ return slot;
+ }
+
+ /* Still nothing; give up and stop the queue for a short while. */
+ netif_stop_subqueue(dev, smp_processor_id());
+ tile_net_schedule_tx_wake_timer(dev);
+ return -1;
+}
+
+/* Determine how many edesc's are needed for TSO.
+ *
+ * Sometimes, if "sendfile()" requires copying, we will be called with
+ * "data" containing the header and payload, with "frags" being empty.
+ * Sometimes, for example when using NFS over TCP, a single segment can
+ * span 3 fragments. This requires special care.
+ */
+static int tso_count_edescs(struct sk_buff *skb)
+{
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ unsigned int data_len = skb->data_len;
+ unsigned int p_len = sh->gso_size;
+ long f_id = -1; /* id of the current fragment */
+ long f_size = -1; /* size of the current fragment */
+ long f_used = -1; /* bytes used from the current fragment */
+ long n; /* size of the current piece of payload */
+ int num_edescs = 0;
+ int segment;
+
+ for (segment = 0; segment < sh->gso_segs; segment++) {
+
+ unsigned int p_used = 0;
+
+ /* One edesc for header and for each piece of the payload. */
+ for (num_edescs++; p_used < p_len; num_edescs++) {
+
+ /* Advance as needed. */
+ while (f_used >= f_size) {
+ f_id++;
+ f_size = sh->frags[f_id].size;
+ f_used = 0;
+ }
+
+ /* Use bytes from the current fragment. */
+ n = p_len - p_used;
+ if (n > f_size - f_used)
+ n = f_size - f_used;
+ f_used += n;
+ p_used += n;
+ }
+
+ /* The last segment may be less than gso_size. */
+ data_len -= p_len;
+ if (data_len < p_len)
+ p_len = data_len;
+ }
+
+ return num_edescs;
+}
+
+/* Prepare modified copies of the skbuff headers.
+ * FIXME: add support for IPv6.
+ */
+static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
+ s64 slot)
+{
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ struct iphdr *ih;
+ struct tcphdr *th;
+ unsigned int data_len = skb->data_len;
+ unsigned char *data = skb->data;
+ unsigned int ih_off, th_off, sh_len, p_len;
+ unsigned int isum_seed, tsum_seed, id, seq;
+ long f_id = -1; /* id of the current fragment */
+ long f_size = -1; /* size of the current fragment */
+ long f_used = -1; /* bytes used from the current fragment */
+ long n; /* size of the current piece of payload */
+ int segment;
+
+ /* Locate original headers and compute various lengths. */
+ ih = ip_hdr(skb);
+ th = tcp_hdr(skb);
+ ih_off = skb_network_offset(skb);
+ th_off = skb_transport_offset(skb);
+ sh_len = th_off + tcp_hdrlen(skb);
+ p_len = sh->gso_size;
+
+ /* Set up seed values for IP and TCP csum and initialize id and seq. */
+ isum_seed = ((0xFFFF - ih->check) +
+ (0xFFFF - ih->tot_len) +
+ (0xFFFF - ih->id));
+ tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
+ id = ntohs(ih->id);
+ seq = ntohl(th->seq);
+
+ /* Prepare all the headers. */
+ for (segment = 0; segment < sh->gso_segs; segment++) {
+ unsigned char *buf;
+ unsigned int p_used = 0;
+
+ /* Copy to the header memory for this segment. */
+ buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
+ NET_IP_ALIGN;
+ memcpy(buf, data, sh_len);
+
+ /* Update copied ip header. */
+ ih = (struct iphdr *)(buf + ih_off);
+ ih->tot_len = htons(sh_len + p_len - ih_off);
+ ih->id = htons(id);
+ ih->check = csum_long(isum_seed + ih->tot_len +
+ ih->id) ^ 0xffff;
+
+ /* Update copied tcp header. */
+ th = (struct tcphdr *)(buf + th_off);
+ th->seq = htonl(seq);
+ th->check = csum_long(tsum_seed + htons(sh_len + p_len));
+ if (segment != sh->gso_segs - 1) {
+ th->fin = 0;
+ th->psh = 0;
+ }
+
+ /* Skip past the header. */
+ slot++;
+
+ /* Skip past the payload. */
+ while (p_used < p_len) {
+
+ /* Advance as needed. */
+ while (f_used >= f_size) {
+ f_id++;
+ f_size = sh->frags[f_id].size;
+ f_used = 0;
+ }
+
+ /* Use bytes from the current fragment. */
+ n = p_len - p_used;
+ if (n > f_size - f_used)
+ n = f_size - f_used;
+ f_used += n;
+ p_used += n;
+
+ slot++;
+ }
+
+ id++;
+ seq += p_len;
+
+ /* The last segment may be less than gso_size. */
+ data_len -= p_len;
+ if (data_len < p_len)
+ p_len = data_len;
+ }
+
+ /* Flush the headers so they are ready for hardware DMA. */
+ wmb();
+}
+
+/* Pass all the data to mpipe for egress. */
+static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
+ struct sk_buff *skb, unsigned char *headers, s64 slot)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ unsigned int data_len = skb->data_len;
+ unsigned int p_len = sh->gso_size;
+ gxio_mpipe_edesc_t edesc_head = { { 0 } };
+ gxio_mpipe_edesc_t edesc_body = { { 0 } };
+ long f_id = -1; /* id of the current fragment */
+ long f_size = -1; /* size of the current fragment */
+ long f_used = -1; /* bytes used from the current fragment */
+ long n; /* size of the current piece of payload */
+ unsigned long tx_packets = 0, tx_bytes = 0;
+ unsigned int csum_start, sh_len;
+ int segment;
+
+ /* Prepare to egress the headers: set up header edesc. */
+ csum_start = skb_checksum_start_offset(skb);
+ sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ edesc_head.csum = 1;
+ edesc_head.csum_start = csum_start;
+ edesc_head.csum_dest = csum_start + skb->csum_offset;
+ edesc_head.xfer_size = sh_len;
+
+ /* This is only used to specify the TLB. */
+ edesc_head.stack_idx = large_buffer_stack;
+ edesc_body.stack_idx = large_buffer_stack;
+
+ /* Egress all the edescs. */
+ for (segment = 0; segment < sh->gso_segs; segment++) {
+ void *va;
+ unsigned char *buf;
+ unsigned int p_used = 0;
+
+ /* Egress the header. */
+ buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
+ NET_IP_ALIGN;
+ edesc_head.va = va_to_tile_io_addr(buf);
+ gxio_mpipe_equeue_put_at(equeue, edesc_head, slot);
+ slot++;
+
+ /* Egress the payload. */
+ while (p_used < p_len) {
+
+ /* Advance as needed. */
+ while (f_used >= f_size) {
+ f_id++;
+ f_size = sh->frags[f_id].size;
+ f_used = 0;
+ }
+
+ va = tile_net_frag_buf(&sh->frags[f_id]) + f_used;
+
+ /* Use bytes from the current fragment. */
+ n = p_len - p_used;
+ if (n > f_size - f_used)
+ n = f_size - f_used;
+ f_used += n;
+ p_used += n;
+
+ /* Egress a piece of the payload. */
+ edesc_body.va = va_to_tile_io_addr(va);
+ edesc_body.xfer_size = n;
+ edesc_body.bound = !(p_used < p_len);
+ gxio_mpipe_equeue_put_at(equeue, edesc_body, slot);
+ slot++;
+ }
+
+ tx_packets++;
+ tx_bytes += sh_len + p_len;
+
+ /* The last segment may be less than gso_size. */
+ data_len -= p_len;
+ if (data_len < p_len)
+ p_len = data_len;
+ }
+
+ /* Update stats. */
+ tile_net_stats_add(tx_packets, &priv->stats.tx_packets);
+ tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes);
+}
+
+/* Do "TSO" handling for egress.
+ *
+ * Normally drivers set NETIF_F_TSO only to support hardware TSO;
+ * otherwise the stack uses scatter-gather to implement GSO in software.
+ * On our testing, enabling GSO support (via NETIF_F_SG) drops network
+ * performance down to around 7.5 Gbps on the 10G interfaces, although
+ * also dropping cpu utilization way down, to under 8%. But
+ * implementing "TSO" in the driver brings performance back up to line
+ * rate, while dropping cpu usage even further, to less than 4%. In
+ * practice, profiling of GSO shows that skb_segment() is what causes
+ * the performance overheads; we benefit in the driver from using
+ * preallocated memory to duplicate the TCP/IP headers.
+ */
+static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int channel = priv->echannel;
+ struct tile_net_egress *egress = &egress_for_echannel[channel];
+ struct tile_net_comps *comps = info->comps_for_echannel[channel];
+ gxio_mpipe_equeue_t *equeue = egress->equeue;
+ unsigned long irqflags;
+ int num_edescs;
+ s64 slot;
+
+ /* Determine how many mpipe edesc's are needed. */
+ num_edescs = tso_count_edescs(skb);
+
+ local_irq_save(irqflags);
+
+ /* Try to acquire a completion entry and an egress slot. */
+ slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
+ if (slot < 0) {
+ local_irq_restore(irqflags);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Set up copies of header data properly. */
+ tso_headers_prepare(skb, egress->headers, slot);
+
+ /* Actually pass the data to the network hardware. */
+ tso_egress(dev, equeue, skb, egress->headers, slot);
+
+ /* Add a completion record. */
+ add_comp(equeue, comps, slot + num_edescs - 1, skb);
+
+ local_irq_restore(irqflags);
+
+ /* Make sure the egress timer is scheduled. */
+ tile_net_schedule_egress_timer();
+
+ return NETDEV_TX_OK;
+}
+
+/* Analyze the body and frags for a transmit request. */
+static unsigned int tile_net_tx_frags(struct frag *frags,
+ struct sk_buff *skb,
+ void *b_data, unsigned int b_len)
+{
+ unsigned int i, n = 0;
+
+ struct skb_shared_info *sh = skb_shinfo(skb);
+
+ if (b_len != 0) {
+ frags[n].buf = b_data;
+ frags[n++].length = b_len;
+ }
+
+ for (i = 0; i < sh->nr_frags; i++) {
+ skb_frag_t *f = &sh->frags[i];
+ frags[n].buf = tile_net_frag_buf(f);
+ frags[n++].length = skb_frag_size(f);
+ }
+
+ return n;
+}
+
+/* Help the kernel transmit a packet. */
+static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_priv *priv = netdev_priv(dev);
+ struct tile_net_egress *egress = &egress_for_echannel[priv->echannel];
+ gxio_mpipe_equeue_t *equeue = egress->equeue;
+ struct tile_net_comps *comps =
+ info->comps_for_echannel[priv->echannel];
+ unsigned int len = skb->len;
+ unsigned char *data = skb->data;
+ unsigned int num_edescs;
+ struct frag frags[MAX_FRAGS];
+ gxio_mpipe_edesc_t edescs[MAX_FRAGS];
+ unsigned long irqflags;
+ gxio_mpipe_edesc_t edesc = { { 0 } };
+ unsigned int i;
+ s64 slot;
+
+ if (skb_is_gso(skb))
+ return tile_net_tx_tso(skb, dev);
+
+ num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
+
+ /* This is only used to specify the TLB. */
+ edesc.stack_idx = large_buffer_stack;
+
+ /* Prepare the edescs. */
+ for (i = 0; i < num_edescs; i++) {
+ edesc.xfer_size = frags[i].length;
+ edesc.va = va_to_tile_io_addr(frags[i].buf);
+ edescs[i] = edesc;
+ }
+
+ /* Mark the final edesc. */
+ edescs[num_edescs - 1].bound = 1;
+
+ /* Add checksum info to the initial edesc, if needed. */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ unsigned int csum_start = skb_checksum_start_offset(skb);
+ edescs[0].csum = 1;
+ edescs[0].csum_start = csum_start;
+ edescs[0].csum_dest = csum_start + skb->csum_offset;
+ }
+
+ local_irq_save(irqflags);
+
+ /* Try to acquire a completion entry and an egress slot. */
+ slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
+ if (slot < 0) {
+ local_irq_restore(irqflags);
+ return NETDEV_TX_BUSY;
+ }
+
+ for (i = 0; i < num_edescs; i++)
+ gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++);
+
+ /* Add a completion record. */
+ add_comp(equeue, comps, slot - 1, skb);
+
+ /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */
+ tile_net_stats_add(1, &priv->stats.tx_packets);
+ tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN),
+ &priv->stats.tx_bytes);
+
+ local_irq_restore(irqflags);
+
+ /* Make sure the egress timer is scheduled. */
+ tile_net_schedule_egress_timer();
+
+ return NETDEV_TX_OK;
+}
+
+/* Return subqueue id on this core (one per core). */
+static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ return smp_processor_id();
+}
+
+/* Deal with a transmit timeout. */
+static void tile_net_tx_timeout(struct net_device *dev)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ netif_wake_subqueue(dev, cpu);
+}
+
+/* Ioctl commands. */
+static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+/* Get system network statistics for device. */
+static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ return &priv->stats;
+}
+
+/* Change the MTU. */
+static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > 1500))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+/* Change the Ethernet address of the NIC.
+ *
+ * The hypervisor driver does not support changing MAC address. However,
+ * the hardware does not do anything with the MAC address, so the address
+ * which gets used on outgoing packets, and which is accepted on incoming
+ * packets, is completely up to us.
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int tile_net_set_mac_address(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void tile_net_netpoll(struct net_device *dev)
+{
+ disable_percpu_irq(ingress_irq);
+ tile_net_handle_ingress_irq(ingress_irq, NULL);
+ enable_percpu_irq(ingress_irq, 0);
+}
+#endif
+
+static const struct net_device_ops tile_net_ops = {
+ .ndo_open = tile_net_open,
+ .ndo_stop = tile_net_stop,
+ .ndo_start_xmit = tile_net_tx,
+ .ndo_select_queue = tile_net_select_queue,
+ .ndo_do_ioctl = tile_net_ioctl,
+ .ndo_get_stats = tile_net_get_stats,
+ .ndo_change_mtu = tile_net_change_mtu,
+ .ndo_tx_timeout = tile_net_tx_timeout,
+ .ndo_set_mac_address = tile_net_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = tile_net_netpoll,
+#endif
+};
+
+/* The setup function.
+ *
+ * This uses ether_setup() to assign various fields in dev, including
+ * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields.
+ */
+static void tile_net_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+ dev->netdev_ops = &tile_net_ops;
+ dev->watchdog_timeo = TILE_NET_TIMEOUT;
+ dev->features |= NETIF_F_LLTX;
+ dev->features |= NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_SG;
+ dev->features |= NETIF_F_TSO;
+ dev->mtu = 1500;
+}
+
+/* Allocate the device structure, register the device, and obtain the
+ * MAC address from the hypervisor.
+ */
+static void tile_net_dev_init(const char *name, const uint8_t *mac)
+{
+ int ret;
+ int i;
+ int nz_addr = 0;
+ struct net_device *dev;
+ struct tile_net_priv *priv;
+
+ /* HACK: Ignore "loop" links. */
+ if (strncmp(name, "loop", 4) == 0)
+ return;
+
+ /* Allocate the device structure. Normally, "name" is a
+ * template, instantiated by register_netdev(), but not for us.
+ */
+ dev = alloc_netdev_mqs(sizeof(*priv), name, tile_net_setup,
+ NR_CPUS, 1);
+ if (!dev) {
+ pr_err("alloc_netdev_mqs(%s) failed\n", name);
+ return;
+ }
+
+ /* Initialize "priv". */
+ priv = netdev_priv(dev);
+ memset(priv, 0, sizeof(*priv));
+ priv->dev = dev;
+ priv->channel = -1;
+ priv->loopify_channel = -1;
+ priv->echannel = -1;
+
+ /* Get the MAC address and set it in the device struct; this must
+ * be done before the device is opened. If the MAC is all zeroes,
+ * we use a random address, since we're probably on the simulator.
+ */
+ for (i = 0; i < 6; i++)
+ nz_addr |= mac[i];
+
+ if (nz_addr) {
+ memcpy(dev->dev_addr, mac, 6);
+ dev->addr_len = 6;
+ } else {
+ random_ether_addr(dev->dev_addr);
+ }
+
+ /* Register the network device. */
+ ret = register_netdev(dev);
+ if (ret) {
+ netdev_err(dev, "register_netdev failed %d\n", ret);
+ free_netdev(dev);
+ return;
+ }
+}
+
+/* Per-cpu module initialization. */
+static void tile_net_init_module_percpu(void *unused)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ int my_cpu = smp_processor_id();
+
+ info->has_iqueue = false;
+
+ info->my_cpu = my_cpu;
+
+ /* Initialize the egress timer. */
+ hrtimer_init(&info->egress_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ info->egress_timer.function = tile_net_handle_egress_timer;
+}
+
+/* Module initialization. */
+static int __init tile_net_init_module(void)
+{
+ int i;
+ char name[GXIO_MPIPE_LINK_NAME_LEN];
+ uint8_t mac[6];
+
+ pr_info("Tilera Network Driver\n");
+
+ mutex_init(&tile_net_devs_for_channel_mutex);
+
+ /* Initialize each CPU. */
+ on_each_cpu(tile_net_init_module_percpu, NULL, 1);
+
+ /* Find out what devices we have, and initialize them. */
+ for (i = 0; gxio_mpipe_link_enumerate_mac(i, name, mac) >= 0; i++)
+ tile_net_dev_init(name, mac);
+
+ if (!network_cpus_init())
+ network_cpus_map = *cpu_online_mask;
+
+ return 0;
+}
+
+module_init(tile_net_init_module);
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 6199f6b..c1ebfe9 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -114,7 +114,8 @@ spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
out_be32(card->regs + reg, value);
}
-/** spider_net_write_phy - write to phy register
+/**
+ * spider_net_write_phy - write to phy register
* @netdev: adapter to be written to
* @mii_id: id of MII
* @reg: PHY register
@@ -137,7 +138,8 @@ spider_net_write_phy(struct net_device *netdev, int mii_id,
spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
}
-/** spider_net_read_phy - read from phy register
+/**
+ * spider_net_read_phy - read from phy register
* @netdev: network device to be read from
* @mii_id: id of MII
* @reg: PHY register
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index ea3e0a2..a46c198 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -486,7 +486,7 @@ static void __devinit velocity_get_options(struct velocity_opt *opts, int index,
velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
- velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
+ velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
opts->numrx = (opts->numrx & ~3);
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 1eaf712..f8e3518 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -197,7 +197,7 @@ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
#endif
/**
- * * temac_dma_bd_release - Release buffer descriptor rings
+ * temac_dma_bd_release - Release buffer descriptor rings
*/
static void temac_dma_bd_release(struct net_device *ndev)
{
@@ -768,7 +768,6 @@ static void ll_temac_recv(struct net_device *ndev)
DMA_FROM_DEVICE);
skb_put(skb, length);
- skb->dev = ndev;
skb->protocol = eth_type_trans(skb, ndev);
skb_checksum_none_assert(skb);
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 4ad80f77..6695a1d 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -2962,7 +2962,7 @@ static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX));
- bp->p_rcv_buff_va[i+j] = (char *) (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
+ bp->p_rcv_buff_va[i+j] = (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
}
#endif
}
@@ -3030,7 +3030,7 @@ static void dfx_rcv_queue_process(
#ifdef DYNAMIC_BUFFERS
p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data);
#else
- p_buff = (char *) bp->p_rcv_buff_va[entry];
+ p_buff = bp->p_rcv_buff_va[entry];
#endif
memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
diff --git a/drivers/net/fddi/skfp/pmf.c b/drivers/net/fddi/skfp/pmf.c
index 9ac4665..24d8566 100644
--- a/drivers/net/fddi/skfp/pmf.c
+++ b/drivers/net/fddi/skfp/pmf.c
@@ -1242,7 +1242,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
if (len < 8)
goto len_error ;
if (set)
- memcpy((char *) to,(char *) from+2,6) ;
+ memcpy(to,from+2,6) ;
to += 8 ;
from += 8 ;
len -= 8 ;
@@ -1251,7 +1251,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
if (len < 4)
goto len_error ;
if (set)
- memcpy((char *) to,(char *) from,4) ;
+ memcpy(to,from,4) ;
to += 4 ;
from += 4 ;
len -= 4 ;
@@ -1260,7 +1260,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
if (len < 8)
goto len_error ;
if (set)
- memcpy((char *) to,(char *) from,8) ;
+ memcpy(to,from,8) ;
to += 8 ;
from += 8 ;
len -= 8 ;
@@ -1269,7 +1269,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
if (len < 32)
goto len_error ;
if (set)
- memcpy((char *) to,(char *) from,32) ;
+ memcpy(to,from,32) ;
to += 32 ;
from += 32 ;
len -= 32 ;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index aed1a61..2c0894a 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -485,7 +485,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
return;
default:
- count = kiss_esc(p, (unsigned char *)ax->xbuff, len);
+ count = kiss_esc(p, ax->xbuff, len);
}
} else {
unsigned short crc;
@@ -497,7 +497,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
case CRC_MODE_SMACK:
*p |= 0x80;
crc = swab16(crc16(0, p, len));
- count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2);
+ count = kiss_esc_crc(p, ax->xbuff, crc, len+2);
break;
case CRC_MODE_FLEX_TEST:
ax->crcmode = CRC_MODE_NONE;
@@ -506,11 +506,11 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
case CRC_MODE_FLEX:
*p |= 0x20;
crc = calc_crc_flex(p, len);
- count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2);
+ count = kiss_esc_crc(p, ax->xbuff, crc, len+2);
break;
default:
- count = kiss_esc(p, (unsigned char *)ax->xbuff, len);
+ count = kiss_esc(p, ax->xbuff, len);
}
}
spin_unlock_bh(&ax->buflock);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 4ffcd57..2857ab07 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -478,6 +478,7 @@ struct netvsc_device {
u32 nvsp_version;
atomic_t num_outstanding_sends;
+ wait_queue_head_t wait_drain;
bool start_remove;
bool destroy;
/*
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 8b91947..6cee291 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -42,6 +42,7 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
if (!net_device)
return NULL;
+ init_waitqueue_head(&net_device->wait_drain);
net_device->start_remove = false;
net_device->destroy = false;
net_device->dev = device;
@@ -387,12 +388,8 @@ int netvsc_device_remove(struct hv_device *device)
spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
/* Wait for all send completions */
- while (atomic_read(&net_device->num_outstanding_sends)) {
- dev_info(&device->device,
- "waiting for %d requests to complete...\n",
- atomic_read(&net_device->num_outstanding_sends));
- udelay(100);
- }
+ wait_event(net_device->wait_drain,
+ atomic_read(&net_device->num_outstanding_sends) == 0);
netvsc_disconnect_vsp(net_device);
@@ -486,6 +483,9 @@ static void netvsc_send_completion(struct hv_device *device,
num_outstanding_sends =
atomic_dec_return(&net_device->num_outstanding_sends);
+ if (net_device->destroy && num_outstanding_sends == 0)
+ wake_up(&net_device->wait_drain);
+
if (netif_queue_stopped(ndev) && !net_device->start_remove &&
(hv_ringbuf_avail_percent(&device->channel->outbound)
> RING_AVAIL_PERCENT_HIWATER ||
@@ -614,7 +614,7 @@ retry_send_cmplt:
static void netvsc_receive_completion(void *context)
{
struct hv_netvsc_packet *packet = context;
- struct hv_device *device = (struct hv_device *)packet->device;
+ struct hv_device *device = packet->device;
struct netvsc_device *net_device;
u64 transaction_id = 0;
bool fsend_receive_comp = false;
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index dcc80d6..8487204 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -1017,7 +1017,7 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
{
int iobase;
- struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv;
+ struct ali_ircc_cb *self = priv;
struct net_device *dev;
IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
@@ -1052,7 +1052,7 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
*/
static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
{
- struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv;
+ struct ali_ircc_cb *self = priv;
unsigned long flags;
int iobase;
int fcr; /* FIFO control reg */
@@ -1121,7 +1121,7 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
{
- struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv;
+ struct ali_ircc_cb *self = priv;
int iobase,dongle_id;
int tmp = 0;
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index fc503aa..e09417d 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -794,7 +794,7 @@ static int __devinit au1k_irda_net_init(struct net_device *dev)
/* allocate the data buffers */
aup->db[0].vaddr =
- (void *)dma_alloc(MAX_BUF_SIZE * 2 * NUM_IR_DESC, &temp);
+ dma_alloc(MAX_BUF_SIZE * 2 * NUM_IR_DESC, &temp);
if (!aup->db[0].vaddr)
goto out3;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 2ee56de..0737bd4 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -847,13 +847,12 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
const struct iovec *iv, unsigned long len,
int noblock)
{
- DECLARE_WAITQUEUE(wait, current);
+ DEFINE_WAIT(wait);
struct sk_buff *skb;
ssize_t ret = 0;
- add_wait_queue(sk_sleep(&q->sk), &wait);
while (len) {
- current->state = TASK_INTERRUPTIBLE;
+ prepare_to_wait(sk_sleep(&q->sk), &wait, TASK_INTERRUPTIBLE);
/* Read frames from the queue */
skb = skb_dequeue(&q->sk.sk_receive_queue);
@@ -875,8 +874,7 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
break;
}
- current->state = TASK_RUNNING;
- remove_wait_queue(sk_sleep(&q->sk), &wait);
+ finish_wait(sk_sleep(&q->sk), &wait);
return ret;
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 944cdfb..3090dc6 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -67,6 +67,11 @@ config BCM63XX_PHY
---help---
Currently supports the 6348 and 6358 PHYs.
+config BCM87XX_PHY
+ tristate "Driver for Broadcom BCM8706 and BCM8727 PHYs"
+ help
+ Currently supports the BCM8706 and BCM8727 10G Ethernet PHYs.
+
config ICPLUS_PHY
tristate "Drivers for ICPlus PHYs"
---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index f51af68..6d2dc6c 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_SMSC_PHY) += smsc.o
obj-$(CONFIG_VITESSE_PHY) += vitesse.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
+obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
obj-$(CONFIG_ICPLUS_PHY) += icplus.o
obj-$(CONFIG_REALTEK_PHY) += realtek.o
obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index cfabd5f..a3fb5ce 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -77,13 +77,7 @@ static struct phy_driver am79c_driver = {
static int __init am79c_init(void)
{
- int ret;
-
- ret = phy_driver_register(&am79c_driver);
- if (ret)
- return ret;
-
- return 0;
+ return phy_driver_register(&am79c_driver);
}
static void __exit am79c_exit(void)
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index cd802eb..84c7a39 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -71,7 +71,8 @@ static int bcm63xx_config_intr(struct phy_device *phydev)
return err;
}
-static struct phy_driver bcm63xx_1_driver = {
+static struct phy_driver bcm63xx_driver[] = {
+{
.phy_id = 0x00406000,
.phy_id_mask = 0xfffffc00,
.name = "Broadcom BCM63XX (1)",
@@ -84,10 +85,8 @@ static struct phy_driver bcm63xx_1_driver = {
.ack_interrupt = bcm63xx_ack_interrupt,
.config_intr = bcm63xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-/* same phy as above, with just a different OUI */
-static struct phy_driver bcm63xx_2_driver = {
+}, {
+ /* same phy as above, with just a different OUI */
.phy_id = 0x002bdc00,
.phy_id_mask = 0xfffffc00,
.name = "Broadcom BCM63XX (2)",
@@ -99,30 +98,18 @@ static struct phy_driver bcm63xx_2_driver = {
.ack_interrupt = bcm63xx_ack_interrupt,
.config_intr = bcm63xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
+} };
static int __init bcm63xx_phy_init(void)
{
- int ret;
-
- ret = phy_driver_register(&bcm63xx_1_driver);
- if (ret)
- goto out_63xx_1;
- ret = phy_driver_register(&bcm63xx_2_driver);
- if (ret)
- goto out_63xx_2;
- return ret;
-
-out_63xx_2:
- phy_driver_unregister(&bcm63xx_1_driver);
-out_63xx_1:
- return ret;
+ return phy_drivers_register(bcm63xx_driver,
+ ARRAY_SIZE(bcm63xx_driver));
}
static void __exit bcm63xx_phy_exit(void)
{
- phy_driver_unregister(&bcm63xx_1_driver);
- phy_driver_unregister(&bcm63xx_2_driver);
+ phy_drivers_unregister(bcm63xx_driver,
+ ARRAY_SIZE(bcm63xx_driver));
}
module_init(bcm63xx_phy_init);
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
new file mode 100644
index 0000000..2346b38
--- /dev/null
+++ b/drivers/net/phy/bcm87xx.c
@@ -0,0 +1,231 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011 - 2012 Cavium, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+
+#define PHY_ID_BCM8706 0x0143bdc1
+#define PHY_ID_BCM8727 0x0143bff0
+
+#define BCM87XX_PMD_RX_SIGNAL_DETECT (MII_ADDR_C45 | 0x1000a)
+#define BCM87XX_10GBASER_PCS_STATUS (MII_ADDR_C45 | 0x30020)
+#define BCM87XX_XGXS_LANE_STATUS (MII_ADDR_C45 | 0x40018)
+
+#define BCM87XX_LASI_CONTROL (MII_ADDR_C45 | 0x39002)
+#define BCM87XX_LASI_STATUS (MII_ADDR_C45 | 0x39005)
+
+#if IS_ENABLED(CONFIG_OF_MDIO)
+/* Set and/or override some configuration registers based on the
+ * broadcom,c45-reg-init property stored in the of_node for the phydev.
+ *
+ * broadcom,c45-reg-init = <devid reg mask value>,...;
+ *
+ * There may be one or more sets of <devid reg mask value>:
+ *
+ * devid: which sub-device to use.
+ * reg: the register.
+ * mask: if non-zero, ANDed with existing register value.
+ * value: ORed with the masked value and written to the regiser.
+ *
+ */
+static int bcm87xx_of_reg_init(struct phy_device *phydev)
+{
+ const __be32 *paddr;
+ const __be32 *paddr_end;
+ int len, ret;
+
+ if (!phydev->dev.of_node)
+ return 0;
+
+ paddr = of_get_property(phydev->dev.of_node,
+ "broadcom,c45-reg-init", &len);
+ if (!paddr)
+ return 0;
+
+ paddr_end = paddr + (len /= sizeof(*paddr));
+
+ ret = 0;
+
+ while (paddr + 3 < paddr_end) {
+ u16 devid = be32_to_cpup(paddr++);
+ u16 reg = be32_to_cpup(paddr++);
+ u16 mask = be32_to_cpup(paddr++);
+ u16 val_bits = be32_to_cpup(paddr++);
+ int val;
+ u32 regnum = MII_ADDR_C45 | (devid << 16) | reg;
+ val = 0;
+ if (mask) {
+ val = phy_read(phydev, regnum);
+ if (val < 0) {
+ ret = val;
+ goto err;
+ }
+ val &= mask;
+ }
+ val |= val_bits;
+
+ ret = phy_write(phydev, regnum, val);
+ if (ret < 0)
+ goto err;
+ }
+err:
+ return ret;
+}
+#else
+static int bcm87xx_of_reg_init(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
+static int bcm87xx_config_init(struct phy_device *phydev)
+{
+ phydev->supported = SUPPORTED_10000baseR_FEC;
+ phydev->advertising = ADVERTISED_10000baseR_FEC;
+ phydev->state = PHY_NOLINK;
+ phydev->autoneg = AUTONEG_DISABLE;
+
+ bcm87xx_of_reg_init(phydev);
+
+ return 0;
+}
+
+static int bcm87xx_config_aneg(struct phy_device *phydev)
+{
+ return -EINVAL;
+}
+
+static int bcm87xx_read_status(struct phy_device *phydev)
+{
+ int rx_signal_detect;
+ int pcs_status;
+ int xgxs_lane_status;
+
+ rx_signal_detect = phy_read(phydev, BCM87XX_PMD_RX_SIGNAL_DETECT);
+ if (rx_signal_detect < 0)
+ return rx_signal_detect;
+
+ if ((rx_signal_detect & 1) == 0)
+ goto no_link;
+
+ pcs_status = phy_read(phydev, BCM87XX_10GBASER_PCS_STATUS);
+ if (pcs_status < 0)
+ return pcs_status;
+
+ if ((pcs_status & 1) == 0)
+ goto no_link;
+
+ xgxs_lane_status = phy_read(phydev, BCM87XX_XGXS_LANE_STATUS);
+ if (xgxs_lane_status < 0)
+ return xgxs_lane_status;
+
+ if ((xgxs_lane_status & 0x1000) == 0)
+ goto no_link;
+
+ phydev->speed = 10000;
+ phydev->link = 1;
+ phydev->duplex = 1;
+ return 0;
+
+no_link:
+ phydev->link = 0;
+ return 0;
+}
+
+static int bcm87xx_config_intr(struct phy_device *phydev)
+{
+ int reg, err;
+
+ reg = phy_read(phydev, BCM87XX_LASI_CONTROL);
+
+ if (reg < 0)
+ return reg;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ reg |= 1;
+ else
+ reg &= ~1;
+
+ err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg);
+ return err;
+}
+
+static int bcm87xx_did_interrupt(struct phy_device *phydev)
+{
+ int reg;
+
+ reg = phy_read(phydev, BCM87XX_LASI_STATUS);
+
+ if (reg < 0) {
+ dev_err(&phydev->dev,
+ "Error: Read of BCM87XX_LASI_STATUS failed: %d\n", reg);
+ return 0;
+ }
+ return (reg & 1) != 0;
+}
+
+static int bcm87xx_ack_interrupt(struct phy_device *phydev)
+{
+ /* Reading the LASI status clears it. */
+ bcm87xx_did_interrupt(phydev);
+ return 0;
+}
+
+static int bcm8706_match_phy_device(struct phy_device *phydev)
+{
+ return phydev->c45_ids.device_ids[4] == PHY_ID_BCM8706;
+}
+
+static int bcm8727_match_phy_device(struct phy_device *phydev)
+{
+ return phydev->c45_ids.device_ids[4] == PHY_ID_BCM8727;
+}
+
+static struct phy_driver bcm87xx_driver[] = {
+{
+ .phy_id = PHY_ID_BCM8706,
+ .phy_id_mask = 0xffffffff,
+ .name = "Broadcom BCM8706",
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = bcm87xx_config_init,
+ .config_aneg = bcm87xx_config_aneg,
+ .read_status = bcm87xx_read_status,
+ .ack_interrupt = bcm87xx_ack_interrupt,
+ .config_intr = bcm87xx_config_intr,
+ .did_interrupt = bcm87xx_did_interrupt,
+ .match_phy_device = bcm8706_match_phy_device,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .phy_id = PHY_ID_BCM8727,
+ .phy_id_mask = 0xffffffff,
+ .name = "Broadcom BCM8727",
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = bcm87xx_config_init,
+ .config_aneg = bcm87xx_config_aneg,
+ .read_status = bcm87xx_read_status,
+ .ack_interrupt = bcm87xx_ack_interrupt,
+ .config_intr = bcm87xx_config_intr,
+ .did_interrupt = bcm87xx_did_interrupt,
+ .match_phy_device = bcm8727_match_phy_device,
+ .driver = { .owner = THIS_MODULE },
+} };
+
+static int __init bcm87xx_init(void)
+{
+ return phy_drivers_register(bcm87xx_driver,
+ ARRAY_SIZE(bcm87xx_driver));
+}
+module_init(bcm87xx_init);
+
+static void __exit bcm87xx_exit(void)
+{
+ phy_drivers_unregister(bcm87xx_driver,
+ ARRAY_SIZE(bcm87xx_driver));
+}
+module_exit(bcm87xx_exit);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 60338ff..f8c90ea 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -682,7 +682,8 @@ static int brcm_fet_config_intr(struct phy_device *phydev)
return err;
}
-static struct phy_driver bcm5411_driver = {
+static struct phy_driver broadcom_drivers[] = {
+{
.phy_id = PHY_ID_BCM5411,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5411",
@@ -695,9 +696,7 @@ static struct phy_driver bcm5411_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm5421_driver = {
+}, {
.phy_id = PHY_ID_BCM5421,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5421",
@@ -710,9 +709,7 @@ static struct phy_driver bcm5421_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm5461_driver = {
+}, {
.phy_id = PHY_ID_BCM5461,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5461",
@@ -725,9 +722,7 @@ static struct phy_driver bcm5461_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm5464_driver = {
+}, {
.phy_id = PHY_ID_BCM5464,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5464",
@@ -740,9 +735,7 @@ static struct phy_driver bcm5464_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm5481_driver = {
+}, {
.phy_id = PHY_ID_BCM5481,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5481",
@@ -755,9 +748,7 @@ static struct phy_driver bcm5481_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm5482_driver = {
+}, {
.phy_id = PHY_ID_BCM5482,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5482",
@@ -770,9 +761,7 @@ static struct phy_driver bcm5482_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm50610_driver = {
+}, {
.phy_id = PHY_ID_BCM50610,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610",
@@ -785,9 +774,7 @@ static struct phy_driver bcm50610_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm50610m_driver = {
+}, {
.phy_id = PHY_ID_BCM50610M,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610M",
@@ -800,9 +787,7 @@ static struct phy_driver bcm50610m_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm57780_driver = {
+}, {
.phy_id = PHY_ID_BCM57780,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM57780",
@@ -815,9 +800,7 @@ static struct phy_driver bcm57780_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcmac131_driver = {
+}, {
.phy_id = PHY_ID_BCMAC131,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCMAC131",
@@ -830,9 +813,7 @@ static struct phy_driver bcmac131_driver = {
.ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm5241_driver = {
+}, {
.phy_id = PHY_ID_BCM5241,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5241",
@@ -845,84 +826,18 @@ static struct phy_driver bcm5241_driver = {
.ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
.driver = { .owner = THIS_MODULE },
-};
+} };
static int __init broadcom_init(void)
{
- int ret;
-
- ret = phy_driver_register(&bcm5411_driver);
- if (ret)
- goto out_5411;
- ret = phy_driver_register(&bcm5421_driver);
- if (ret)
- goto out_5421;
- ret = phy_driver_register(&bcm5461_driver);
- if (ret)
- goto out_5461;
- ret = phy_driver_register(&bcm5464_driver);
- if (ret)
- goto out_5464;
- ret = phy_driver_register(&bcm5481_driver);
- if (ret)
- goto out_5481;
- ret = phy_driver_register(&bcm5482_driver);
- if (ret)
- goto out_5482;
- ret = phy_driver_register(&bcm50610_driver);
- if (ret)
- goto out_50610;
- ret = phy_driver_register(&bcm50610m_driver);
- if (ret)
- goto out_50610m;
- ret = phy_driver_register(&bcm57780_driver);
- if (ret)
- goto out_57780;
- ret = phy_driver_register(&bcmac131_driver);
- if (ret)
- goto out_ac131;
- ret = phy_driver_register(&bcm5241_driver);
- if (ret)
- goto out_5241;
- return ret;
-
-out_5241:
- phy_driver_unregister(&bcmac131_driver);
-out_ac131:
- phy_driver_unregister(&bcm57780_driver);
-out_57780:
- phy_driver_unregister(&bcm50610m_driver);
-out_50610m:
- phy_driver_unregister(&bcm50610_driver);
-out_50610:
- phy_driver_unregister(&bcm5482_driver);
-out_5482:
- phy_driver_unregister(&bcm5481_driver);
-out_5481:
- phy_driver_unregister(&bcm5464_driver);
-out_5464:
- phy_driver_unregister(&bcm5461_driver);
-out_5461:
- phy_driver_unregister(&bcm5421_driver);
-out_5421:
- phy_driver_unregister(&bcm5411_driver);
-out_5411:
- return ret;
+ return phy_drivers_register(broadcom_drivers,
+ ARRAY_SIZE(broadcom_drivers));
}
static void __exit broadcom_exit(void)
{
- phy_driver_unregister(&bcm5241_driver);
- phy_driver_unregister(&bcmac131_driver);
- phy_driver_unregister(&bcm57780_driver);
- phy_driver_unregister(&bcm50610m_driver);
- phy_driver_unregister(&bcm50610_driver);
- phy_driver_unregister(&bcm5482_driver);
- phy_driver_unregister(&bcm5481_driver);
- phy_driver_unregister(&bcm5464_driver);
- phy_driver_unregister(&bcm5461_driver);
- phy_driver_unregister(&bcm5421_driver);
- phy_driver_unregister(&bcm5411_driver);
+ phy_drivers_unregister(broadcom_drivers,
+ ARRAY_SIZE(broadcom_drivers));
}
module_init(broadcom_init);
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index d281731..db472ff 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -102,7 +102,8 @@ static int cis820x_config_intr(struct phy_device *phydev)
}
/* Cicada 8201, a.k.a Vitesse VSC8201 */
-static struct phy_driver cis8201_driver = {
+static struct phy_driver cis820x_driver[] = {
+{
.phy_id = 0x000fc410,
.name = "Cicada Cis8201",
.phy_id_mask = 0x000ffff0,
@@ -113,11 +114,8 @@ static struct phy_driver cis8201_driver = {
.read_status = &genphy_read_status,
.ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
- .driver = { .owner = THIS_MODULE,},
-};
-
-/* Cicada 8204 */
-static struct phy_driver cis8204_driver = {
+ .driver = { .owner = THIS_MODULE,},
+}, {
.phy_id = 0x000fc440,
.name = "Cicada Cis8204",
.phy_id_mask = 0x000fffc0,
@@ -128,32 +126,19 @@ static struct phy_driver cis8204_driver = {
.read_status = &genphy_read_status,
.ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
- .driver = { .owner = THIS_MODULE,},
-};
+ .driver = { .owner = THIS_MODULE,},
+} };
static int __init cicada_init(void)
{
- int ret;
-
- ret = phy_driver_register(&cis8204_driver);
- if (ret)
- goto err1;
-
- ret = phy_driver_register(&cis8201_driver);
- if (ret)
- goto err2;
- return 0;
-
-err2:
- phy_driver_unregister(&cis8204_driver);
-err1:
- return ret;
+ return phy_drivers_register(cis820x_driver,
+ ARRAY_SIZE(cis820x_driver));
}
static void __exit cicada_exit(void)
{
- phy_driver_unregister(&cis8204_driver);
- phy_driver_unregister(&cis8201_driver);
+ phy_drivers_unregister(cis820x_driver,
+ ARRAY_SIZE(cis820x_driver));
}
module_init(cicada_init);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 5f59cc0..81c7bc0 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -144,7 +144,8 @@ static int dm9161_ack_interrupt(struct phy_device *phydev)
return (err < 0) ? err : 0;
}
-static struct phy_driver dm9161e_driver = {
+static struct phy_driver dm91xx_driver[] = {
+{
.phy_id = 0x0181b880,
.name = "Davicom DM9161E",
.phy_id_mask = 0x0ffffff0,
@@ -153,9 +154,7 @@ static struct phy_driver dm9161e_driver = {
.config_aneg = dm9161_config_aneg,
.read_status = genphy_read_status,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver dm9161a_driver = {
+}, {
.phy_id = 0x0181b8a0,
.name = "Davicom DM9161A",
.phy_id_mask = 0x0ffffff0,
@@ -164,9 +163,7 @@ static struct phy_driver dm9161a_driver = {
.config_aneg = dm9161_config_aneg,
.read_status = genphy_read_status,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver dm9131_driver = {
+}, {
.phy_id = 0x00181b80,
.name = "Davicom DM9131",
.phy_id_mask = 0x0ffffff0,
@@ -177,38 +174,18 @@ static struct phy_driver dm9131_driver = {
.ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
.driver = { .owner = THIS_MODULE,},
-};
+} };
static int __init davicom_init(void)
{
- int ret;
-
- ret = phy_driver_register(&dm9161e_driver);
- if (ret)
- goto err1;
-
- ret = phy_driver_register(&dm9161a_driver);
- if (ret)
- goto err2;
-
- ret = phy_driver_register(&dm9131_driver);
- if (ret)
- goto err3;
- return 0;
-
- err3:
- phy_driver_unregister(&dm9161a_driver);
- err2:
- phy_driver_unregister(&dm9161e_driver);
- err1:
- return ret;
+ return phy_drivers_register(dm91xx_driver,
+ ARRAY_SIZE(dm91xx_driver));
}
static void __exit davicom_exit(void)
{
- phy_driver_unregister(&dm9161e_driver);
- phy_driver_unregister(&dm9161a_driver);
- phy_driver_unregister(&dm9131_driver);
+ phy_drivers_unregister(dm91xx_driver,
+ ARRAY_SIZE(dm91xx_driver));
}
module_init(davicom_init);
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 940b290..b0da022 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -17,6 +17,9 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -453,16 +456,16 @@ static void enable_status_frames(struct phy_device *phydev, bool on)
ext_write(0, phydev, PAGE6, PSF_CFG1, ver);
if (!phydev->attached_dev) {
- pr_warning("dp83640: expected to find an attached netdevice\n");
+ pr_warn("expected to find an attached netdevice\n");
return;
}
if (on) {
if (dev_mc_add(phydev->attached_dev, status_frame_dst))
- pr_warning("dp83640: failed to add mc address\n");
+ pr_warn("failed to add mc address\n");
} else {
if (dev_mc_del(phydev->attached_dev, status_frame_dst))
- pr_warning("dp83640: failed to delete mc address\n");
+ pr_warn("failed to delete mc address\n");
}
}
@@ -582,9 +585,9 @@ static void recalibrate(struct dp83640_clock *clock)
* read out and correct offsets
*/
val = ext_read(master, PAGE4, PTP_STS);
- pr_info("master PTP_STS 0x%04hx", val);
+ pr_info("master PTP_STS 0x%04hx\n", val);
val = ext_read(master, PAGE4, PTP_ESTS);
- pr_info("master PTP_ESTS 0x%04hx", val);
+ pr_info("master PTP_ESTS 0x%04hx\n", val);
event_ts.ns_lo = ext_read(master, PAGE4, PTP_EDATA);
event_ts.ns_hi = ext_read(master, PAGE4, PTP_EDATA);
event_ts.sec_lo = ext_read(master, PAGE4, PTP_EDATA);
@@ -594,9 +597,9 @@ static void recalibrate(struct dp83640_clock *clock)
list_for_each(this, &clock->phylist) {
tmp = list_entry(this, struct dp83640_private, list);
val = ext_read(tmp->phydev, PAGE4, PTP_STS);
- pr_info("slave PTP_STS 0x%04hx", val);
+ pr_info("slave PTP_STS 0x%04hx\n", val);
val = ext_read(tmp->phydev, PAGE4, PTP_ESTS);
- pr_info("slave PTP_ESTS 0x%04hx", val);
+ pr_info("slave PTP_ESTS 0x%04hx\n", val);
event_ts.ns_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
event_ts.ns_hi = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
event_ts.sec_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
@@ -686,7 +689,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
prune_rx_ts(dp83640);
if (list_empty(&dp83640->rxpool)) {
- pr_debug("dp83640: rx timestamp pool is empty\n");
+ pr_debug("rx timestamp pool is empty\n");
goto out;
}
rxts = list_first_entry(&dp83640->rxpool, struct rxts, list);
@@ -709,7 +712,7 @@ static void decode_txts(struct dp83640_private *dp83640,
skb = skb_dequeue(&dp83640->tx_queue);
if (!skb) {
- pr_debug("dp83640: have timestamp but tx_queue empty\n");
+ pr_debug("have timestamp but tx_queue empty\n");
return;
}
ns = phy2txts(phy_txts);
@@ -847,7 +850,7 @@ static void dp83640_free_clocks(void)
list_for_each_safe(this, next, &phyter_clocks) {
clock = list_entry(this, struct dp83640_clock, list);
if (!list_empty(&clock->phylist)) {
- pr_warning("phy list non-empty while unloading");
+ pr_warn("phy list non-empty while unloading\n");
BUG();
}
list_del(&clock->list);
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index 633680d..ba55adf 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -70,7 +70,7 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
lpa |= LPA_10FULL;
break;
default:
- printk(KERN_WARNING "fixed phy: unknown speed\n");
+ pr_warn("fixed phy: unknown speed\n");
return -EINVAL;
}
} else {
@@ -90,7 +90,7 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
lpa |= LPA_10HALF;
break;
default:
- printk(KERN_WARNING "fixed phy: unknown speed\n");
+ pr_warn("fixed phy: unknown speed\n");
return -EINVAL;
}
}
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 5ac46f5..d5199cb 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -41,6 +41,8 @@ MODULE_LICENSE("GPL");
#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */
#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */
+#define IP101A_G_IRQ_PIN_USED (1<<15) /* INTR pin used */
+#define IP101A_G_IRQ_DEFAULT IP101A_G_IRQ_PIN_USED
static int ip175c_config_init(struct phy_device *phydev)
{
@@ -136,6 +138,11 @@ static int ip1001_config_init(struct phy_device *phydev)
if (c < 0)
return c;
+ /* INTR pin used: speed/link/duplex will cause an interrupt */
+ c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
+ if (c < 0)
+ return c;
+
if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
/* Additional delay (2ns) used to adjust RX clock phase
* at RGMII interface */
@@ -195,7 +202,8 @@ static int ip101a_g_ack_interrupt(struct phy_device *phydev)
return 0;
}
-static struct phy_driver ip175c_driver = {
+static struct phy_driver icplus_driver[] = {
+{
.phy_id = 0x02430d80,
.name = "ICPlus IP175C",
.phy_id_mask = 0x0ffffff0,
@@ -206,9 +214,7 @@ static struct phy_driver ip175c_driver = {
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver ip1001_driver = {
+}, {
.phy_id = 0x02430d90,
.name = "ICPlus IP1001",
.phy_id_mask = 0x0ffffff0,
@@ -220,9 +226,7 @@ static struct phy_driver ip1001_driver = {
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver ip101a_g_driver = {
+}, {
.phy_id = 0x02430c54,
.name = "ICPlus IP101A/G",
.phy_id_mask = 0x0ffffff0,
@@ -236,28 +240,18 @@ static struct phy_driver ip101a_g_driver = {
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
-};
+} };
static int __init icplus_init(void)
{
- int ret = 0;
-
- ret = phy_driver_register(&ip1001_driver);
- if (ret < 0)
- return -ENODEV;
-
- ret = phy_driver_register(&ip101a_g_driver);
- if (ret < 0)
- return -ENODEV;
-
- return phy_driver_register(&ip175c_driver);
+ return phy_drivers_register(icplus_driver,
+ ARRAY_SIZE(icplus_driver));
}
static void __exit icplus_exit(void)
{
- phy_driver_unregister(&ip1001_driver);
- phy_driver_unregister(&ip101a_g_driver);
- phy_driver_unregister(&ip175c_driver);
+ phy_drivers_unregister(icplus_driver,
+ ARRAY_SIZE(icplus_driver));
}
module_init(icplus_init);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 6f6e8b6..6d1e3fc 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -149,7 +149,8 @@ static int lxt973_config_aneg(struct phy_device *phydev)
return phydev->priv ? 0 : genphy_config_aneg(phydev);
}
-static struct phy_driver lxt970_driver = {
+static struct phy_driver lxt97x_driver[] = {
+{
.phy_id = 0x78100000,
.name = "LXT970",
.phy_id_mask = 0xfffffff0,
@@ -160,10 +161,8 @@ static struct phy_driver lxt970_driver = {
.read_status = genphy_read_status,
.ack_interrupt = lxt970_ack_interrupt,
.config_intr = lxt970_config_intr,
- .driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver lxt971_driver = {
+ .driver = { .owner = THIS_MODULE,},
+}, {
.phy_id = 0x001378e0,
.name = "LXT971",
.phy_id_mask = 0xfffffff0,
@@ -173,10 +172,8 @@ static struct phy_driver lxt971_driver = {
.read_status = genphy_read_status,
.ack_interrupt = lxt971_ack_interrupt,
.config_intr = lxt971_config_intr,
- .driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver lxt973_driver = {
+ .driver = { .owner = THIS_MODULE,},
+}, {
.phy_id = 0x00137a10,
.name = "LXT973",
.phy_id_mask = 0xfffffff0,
@@ -185,39 +182,19 @@ static struct phy_driver lxt973_driver = {
.probe = lxt973_probe,
.config_aneg = lxt973_config_aneg,
.read_status = genphy_read_status,
- .driver = { .owner = THIS_MODULE,},
-};
+ .driver = { .owner = THIS_MODULE,},
+} };
static int __init lxt_init(void)
{
- int ret;
-
- ret = phy_driver_register(&lxt970_driver);
- if (ret)
- goto err1;
-
- ret = phy_driver_register(&lxt971_driver);
- if (ret)
- goto err2;
-
- ret = phy_driver_register(&lxt973_driver);
- if (ret)
- goto err3;
- return 0;
-
- err3:
- phy_driver_unregister(&lxt971_driver);
- err2:
- phy_driver_unregister(&lxt970_driver);
- err1:
- return ret;
+ return phy_drivers_register(lxt97x_driver,
+ ARRAY_SIZE(lxt97x_driver));
}
static void __exit lxt_exit(void)
{
- phy_driver_unregister(&lxt970_driver);
- phy_driver_unregister(&lxt971_driver);
- phy_driver_unregister(&lxt973_driver);
+ phy_drivers_unregister(lxt97x_driver,
+ ARRAY_SIZE(lxt97x_driver));
}
module_init(lxt_init);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 418928d..5d2a3f2 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -826,28 +826,14 @@ static struct phy_driver marvell_drivers[] = {
static int __init marvell_init(void)
{
- int ret;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(marvell_drivers); i++) {
- ret = phy_driver_register(&marvell_drivers[i]);
-
- if (ret) {
- while (i-- > 0)
- phy_driver_unregister(&marvell_drivers[i]);
- return ret;
- }
- }
-
- return 0;
+ return phy_drivers_register(marvell_drivers,
+ ARRAY_SIZE(marvell_drivers));
}
static void __exit marvell_exit(void)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(marvell_drivers); i++)
- phy_driver_unregister(&marvell_drivers[i]);
+ phy_drivers_unregister(marvell_drivers,
+ ARRAY_SIZE(marvell_drivers));
}
module_init(marvell_init);
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 39ea067..5c12018 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -46,7 +46,13 @@ static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum)
struct mdio_mux_parent_bus *pb = cb->parent;
int r;
- mutex_lock(&pb->mii_bus->mdio_lock);
+ /* In theory multiple mdio_mux could be stacked, thus creating
+ * more than a single level of nesting. But in practice,
+ * SINGLE_DEPTH_NESTING will cover the vast majority of use
+ * cases. We use it, instead of trying to handle the general
+ * case.
+ */
+ mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
if (r)
goto out;
@@ -71,7 +77,7 @@ static int mdio_mux_write(struct mii_bus *bus, int phy_id,
int r;
- mutex_lock(&pb->mii_bus->mdio_lock);
+ mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
if (r)
goto out;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 683ef1c..170eb41 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -13,6 +13,9 @@
* option) any later version.
*
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
@@ -22,6 +25,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/of_device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
@@ -96,7 +100,7 @@ static int of_mdio_bus_match(struct device *dev, void *mdio_bus_np)
}
/**
* of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
- * @mdio_np: Pointer to the mii_bus.
+ * @mdio_bus_np: Pointer to the mii_bus.
*
* Returns a pointer to the mii_bus, or NULL if none found.
*
@@ -148,7 +152,7 @@ int mdiobus_register(struct mii_bus *bus)
err = device_register(&bus->dev);
if (err) {
- printk(KERN_ERR "mii_bus %s failed to register\n", bus->id);
+ pr_err("mii_bus %s failed to register\n", bus->id);
return -EINVAL;
}
@@ -229,7 +233,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
struct phy_device *phydev;
int err;
- phydev = get_phy_device(bus, addr);
+ phydev = get_phy_device(bus, addr, false);
if (IS_ERR(phydev) || phydev == NULL)
return phydev;
@@ -305,6 +309,12 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
struct phy_device *phydev = to_phy_device(dev);
struct phy_driver *phydrv = to_phy_driver(drv);
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ if (phydrv->match_phy_device)
+ return phydrv->match_phy_device(phydev);
+
return ((phydrv->phy_id & phydrv->phy_id_mask) ==
(phydev->phy_id & phydrv->phy_id_mask));
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 590f902..cf287e0 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -114,7 +114,8 @@ static int ks8051_config_init(struct phy_device *phydev)
return 0;
}
-static struct phy_driver ks8737_driver = {
+static struct phy_driver ksphy_driver[] = {
+{
.phy_id = PHY_ID_KS8737,
.phy_id_mask = 0x00fffff0,
.name = "Micrel KS8737",
@@ -126,9 +127,7 @@ static struct phy_driver ks8737_driver = {
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = ks8737_config_intr,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver ks8041_driver = {
+}, {
.phy_id = PHY_ID_KS8041,
.phy_id_mask = 0x00fffff0,
.name = "Micrel KS8041",
@@ -141,9 +140,7 @@ static struct phy_driver ks8041_driver = {
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver ks8051_driver = {
+}, {
.phy_id = PHY_ID_KS8051,
.phy_id_mask = 0x00fffff0,
.name = "Micrel KS8051",
@@ -156,12 +153,10 @@ static struct phy_driver ks8051_driver = {
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver ks8001_driver = {
+}, {
.phy_id = PHY_ID_KS8001,
.name = "Micrel KS8001 or KS8721",
- .phy_id_mask = 0x00fffff0,
+ .phy_id_mask = 0x00ffffff,
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
@@ -170,11 +165,9 @@ static struct phy_driver ks8001_driver = {
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver ksz9021_driver = {
+}, {
.phy_id = PHY_ID_KSZ9021,
- .phy_id_mask = 0x000fff10,
+ .phy_id_mask = 0x000ffffe,
.name = "Micrel KSZ9021 Gigabit PHY",
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
@@ -185,51 +178,18 @@ static struct phy_driver ksz9021_driver = {
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = ksz9021_config_intr,
.driver = { .owner = THIS_MODULE, },
-};
+} };
static int __init ksphy_init(void)
{
- int ret;
-
- ret = phy_driver_register(&ks8001_driver);
- if (ret)
- goto err1;
-
- ret = phy_driver_register(&ksz9021_driver);
- if (ret)
- goto err2;
-
- ret = phy_driver_register(&ks8737_driver);
- if (ret)
- goto err3;
- ret = phy_driver_register(&ks8041_driver);
- if (ret)
- goto err4;
- ret = phy_driver_register(&ks8051_driver);
- if (ret)
- goto err5;
-
- return 0;
-
-err5:
- phy_driver_unregister(&ks8041_driver);
-err4:
- phy_driver_unregister(&ks8737_driver);
-err3:
- phy_driver_unregister(&ksz9021_driver);
-err2:
- phy_driver_unregister(&ks8001_driver);
-err1:
- return ret;
+ return phy_drivers_register(ksphy_driver,
+ ARRAY_SIZE(ksphy_driver));
}
static void __exit ksphy_exit(void)
{
- phy_driver_unregister(&ks8001_driver);
- phy_driver_unregister(&ks8737_driver);
- phy_driver_unregister(&ksz9021_driver);
- phy_driver_unregister(&ks8041_driver);
- phy_driver_unregister(&ks8051_driver);
+ phy_drivers_unregister(ksphy_driver,
+ ARRAY_SIZE(ksphy_driver));
}
module_init(ksphy_init);
@@ -240,8 +200,8 @@ MODULE_AUTHOR("David J. Choi");
MODULE_LICENSE("GPL");
static struct mdio_device_id __maybe_unused micrel_tbl[] = {
- { PHY_ID_KSZ9021, 0x000fff10 },
- { PHY_ID_KS8001, 0x00fffff0 },
+ { PHY_ID_KSZ9021, 0x000ffffe },
+ { PHY_ID_KS8001, 0x00ffffff },
{ PHY_ID_KS8737, 0x00fffff0 },
{ PHY_ID_KS8041, 0x00fffff0 },
{ PHY_ID_KS8051, 0x00fffff0 },
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 04bb8fc..9a5f234 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -15,6 +15,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mii.h>
@@ -22,6 +24,8 @@
#include <linux/phy.h>
#include <linux/netdevice.h>
+#define DEBUG
+
/* DP83865 phy identifier values */
#define DP83865_PHY_ID 0x20005c7a
@@ -112,8 +116,8 @@ static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
ns_exp_write(phydev, 0x1c0,
ns_exp_read(phydev, 0x1c0) & 0xfffe);
- printk(KERN_DEBUG "DP83865 PHY: 10BASE-T HDX loopback %s\n",
- (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
+ pr_debug("10BASE-T HDX loopback %s\n",
+ (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
}
static int ns_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3cbda08..7ca2ff9 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -15,6 +15,9 @@
* option) any later version.
*
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
@@ -32,6 +35,7 @@
#include <linux/phy.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+#include <linux/mdio.h>
#include <linux/atomic.h>
#include <asm/io.h>
@@ -44,18 +48,16 @@
*/
void phy_print_status(struct phy_device *phydev)
{
- pr_info("PHY: %s - Link is %s", dev_name(&phydev->dev),
- phydev->link ? "Up" : "Down");
if (phydev->link)
- printk(KERN_CONT " - %d/%s", phydev->speed,
- DUPLEX_FULL == phydev->duplex ?
- "Full" : "Half");
-
- printk(KERN_CONT "\n");
+ pr_info("%s - Link is Up - %d/%s\n",
+ dev_name(&phydev->dev),
+ phydev->speed,
+ DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
+ else
+ pr_info("%s - Link is Down\n", dev_name(&phydev->dev));
}
EXPORT_SYMBOL(phy_print_status);
-
/**
* phy_clear_interrupt - Ack the phy device's interrupt
* @phydev: the phy_device struct
@@ -482,9 +484,8 @@ static void phy_force_reduction(struct phy_device *phydev)
phydev->speed = settings[idx].speed;
phydev->duplex = settings[idx].duplex;
- pr_info("Trying %d/%s\n", phydev->speed,
- DUPLEX_FULL == phydev->duplex ?
- "FULL" : "HALF");
+ pr_info("Trying %d/%s\n",
+ phydev->speed, DUPLEX_FULL == phydev->duplex ? "FULL" : "HALF");
}
@@ -598,9 +599,8 @@ int phy_start_interrupts(struct phy_device *phydev)
IRQF_SHARED,
"phy_interrupt",
phydev) < 0) {
- printk(KERN_WARNING "%s: Can't get IRQ %d (PHY)\n",
- phydev->bus->name,
- phydev->irq);
+ pr_warn("%s: Can't get IRQ %d (PHY)\n",
+ phydev->bus->name, phydev->irq);
phydev->irq = PHY_POLL;
return 0;
}
@@ -838,10 +838,10 @@ void phy_state_machine(struct work_struct *work)
phydev->autoneg = AUTONEG_DISABLE;
- pr_info("Trying %d/%s\n", phydev->speed,
- DUPLEX_FULL ==
- phydev->duplex ?
- "FULL" : "HALF");
+ pr_info("Trying %d/%s\n",
+ phydev->speed,
+ DUPLEX_FULL == phydev->duplex ?
+ "FULL" : "HALF");
}
break;
case PHY_NOLINK:
@@ -968,3 +968,283 @@ void phy_state_machine(struct work_struct *work)
schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ);
}
+
+static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
+ int addr)
+{
+ /* Write the desired MMD Devad */
+ bus->write(bus, addr, MII_MMD_CTRL, devad);
+
+ /* Write the desired MMD register address */
+ bus->write(bus, addr, MII_MMD_DATA, prtad);
+
+ /* Select the Function : DATA with no post increment */
+ bus->write(bus, addr, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
+}
+
+/**
+ * phy_read_mmd_indirect - reads data from the MMD registers
+ * @bus: the target MII bus
+ * @prtad: MMD Address
+ * @devad: MMD DEVAD
+ * @addr: PHY address on the MII bus
+ *
+ * Description: it reads data from the MMD registers (clause 22 to access to
+ * clause 45) of the specified phy address.
+ * To read these register we have:
+ * 1) Write reg 13 // DEVAD
+ * 2) Write reg 14 // MMD Address
+ * 3) Write reg 13 // MMD Data Command for MMD DEVAD
+ * 3) Read reg 14 // Read MMD data
+ */
+static int phy_read_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
+ int addr)
+{
+ u32 ret;
+
+ mmd_phy_indirect(bus, prtad, devad, addr);
+
+ /* Read the content of the MMD's selected register */
+ ret = bus->read(bus, addr, MII_MMD_DATA);
+
+ return ret;
+}
+
+/**
+ * phy_write_mmd_indirect - writes data to the MMD registers
+ * @bus: the target MII bus
+ * @prtad: MMD Address
+ * @devad: MMD DEVAD
+ * @addr: PHY address on the MII bus
+ * @data: data to write in the MMD register
+ *
+ * Description: Write data from the MMD registers of the specified
+ * phy address.
+ * To write these register we have:
+ * 1) Write reg 13 // DEVAD
+ * 2) Write reg 14 // MMD Address
+ * 3) Write reg 13 // MMD Data Command for MMD DEVAD
+ * 3) Write reg 14 // Write MMD data
+ */
+static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
+ int addr, u32 data)
+{
+ mmd_phy_indirect(bus, prtad, devad, addr);
+
+ /* Write the data into MMD's selected register */
+ bus->write(bus, addr, MII_MMD_DATA, data);
+}
+
+static u32 phy_eee_to_adv(u16 eee_adv)
+{
+ u32 adv = 0;
+
+ if (eee_adv & MDIO_EEE_100TX)
+ adv |= ADVERTISED_100baseT_Full;
+ if (eee_adv & MDIO_EEE_1000T)
+ adv |= ADVERTISED_1000baseT_Full;
+ if (eee_adv & MDIO_EEE_10GT)
+ adv |= ADVERTISED_10000baseT_Full;
+ if (eee_adv & MDIO_EEE_1000KX)
+ adv |= ADVERTISED_1000baseKX_Full;
+ if (eee_adv & MDIO_EEE_10GKX4)
+ adv |= ADVERTISED_10000baseKX4_Full;
+ if (eee_adv & MDIO_EEE_10GKR)
+ adv |= ADVERTISED_10000baseKR_Full;
+
+ return adv;
+}
+
+static u32 phy_eee_to_supported(u16 eee_caported)
+{
+ u32 supported = 0;
+
+ if (eee_caported & MDIO_EEE_100TX)
+ supported |= SUPPORTED_100baseT_Full;
+ if (eee_caported & MDIO_EEE_1000T)
+ supported |= SUPPORTED_1000baseT_Full;
+ if (eee_caported & MDIO_EEE_10GT)
+ supported |= SUPPORTED_10000baseT_Full;
+ if (eee_caported & MDIO_EEE_1000KX)
+ supported |= SUPPORTED_1000baseKX_Full;
+ if (eee_caported & MDIO_EEE_10GKX4)
+ supported |= SUPPORTED_10000baseKX4_Full;
+ if (eee_caported & MDIO_EEE_10GKR)
+ supported |= SUPPORTED_10000baseKR_Full;
+
+ return supported;
+}
+
+static u16 phy_adv_to_eee(u32 adv)
+{
+ u16 reg = 0;
+
+ if (adv & ADVERTISED_100baseT_Full)
+ reg |= MDIO_EEE_100TX;
+ if (adv & ADVERTISED_1000baseT_Full)
+ reg |= MDIO_EEE_1000T;
+ if (adv & ADVERTISED_10000baseT_Full)
+ reg |= MDIO_EEE_10GT;
+ if (adv & ADVERTISED_1000baseKX_Full)
+ reg |= MDIO_EEE_1000KX;
+ if (adv & ADVERTISED_10000baseKX4_Full)
+ reg |= MDIO_EEE_10GKX4;
+ if (adv & ADVERTISED_10000baseKR_Full)
+ reg |= MDIO_EEE_10GKR;
+
+ return reg;
+}
+
+/**
+ * phy_init_eee - init and check the EEE feature
+ * @phydev: target phy_device struct
+ * @clk_stop_enable: PHY may stop the clock during LPI
+ *
+ * Description: it checks if the Energy-Efficient Ethernet (EEE)
+ * is supported by looking at the MMD registers 3.20 and 7.60/61
+ * and it programs the MMD register 3.0 setting the "Clock stop enable"
+ * bit if required.
+ */
+int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
+{
+ int ret = -EPROTONOSUPPORT;
+
+ /* According to 802.3az,the EEE is supported only in full duplex-mode.
+ * Also EEE feature is active when core is operating with MII, GMII
+ * or RGMII.
+ */
+ if ((phydev->duplex == DUPLEX_FULL) &&
+ ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
+ (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII))) {
+ int eee_lp, eee_cap, eee_adv;
+ u32 lp, cap, adv;
+ int idx, status;
+
+ /* Read phy status to properly get the right settings */
+ status = phy_read_status(phydev);
+ if (status)
+ return status;
+
+ /* First check if the EEE ability is supported */
+ eee_cap = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE,
+ MDIO_MMD_PCS, phydev->addr);
+ if (eee_cap < 0)
+ return eee_cap;
+
+ cap = phy_eee_to_supported(eee_cap);
+ if (!cap)
+ goto eee_exit;
+
+ /* Check which link settings negotiated and verify it in
+ * the EEE advertising registers.
+ */
+ eee_lp = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
+ MDIO_MMD_AN, phydev->addr);
+ if (eee_lp < 0)
+ return eee_lp;
+
+ eee_adv = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
+ MDIO_MMD_AN, phydev->addr);
+ if (eee_adv < 0)
+ return eee_adv;
+
+ adv = phy_eee_to_adv(eee_adv);
+ lp = phy_eee_to_adv(eee_lp);
+ idx = phy_find_setting(phydev->speed, phydev->duplex);
+ if ((lp & adv & settings[idx].setting))
+ goto eee_exit;
+
+ if (clk_stop_enable) {
+ /* Configure the PHY to stop receiving xMII
+ * clock while it is signaling LPI.
+ */
+ int val = phy_read_mmd_indirect(phydev->bus, MDIO_CTRL1,
+ MDIO_MMD_PCS,
+ phydev->addr);
+ if (val < 0)
+ return val;
+
+ val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
+ phy_write_mmd_indirect(phydev->bus, MDIO_CTRL1,
+ MDIO_MMD_PCS, phydev->addr, val);
+ }
+
+ ret = 0; /* EEE supported */
+ }
+
+eee_exit:
+ return ret;
+}
+EXPORT_SYMBOL(phy_init_eee);
+
+/**
+ * phy_get_eee_err - report the EEE wake error count
+ * @phydev: target phy_device struct
+ *
+ * Description: it is to report the number of time where the PHY
+ * failed to complete its normal wake sequence.
+ */
+int phy_get_eee_err(struct phy_device *phydev)
+{
+ return phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_WK_ERR,
+ MDIO_MMD_PCS, phydev->addr);
+
+}
+EXPORT_SYMBOL(phy_get_eee_err);
+
+/**
+ * phy_ethtool_get_eee - get EEE supported and status
+ * @phydev: target phy_device struct
+ * @data: ethtool_eee data
+ *
+ * Description: it reportes the Supported/Advertisement/LP Advertisement
+ * capabilities.
+ */
+int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
+{
+ int val;
+
+ /* Get Supported EEE */
+ val = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE,
+ MDIO_MMD_PCS, phydev->addr);
+ if (val < 0)
+ return val;
+ data->supported = phy_eee_to_supported(val);
+
+ /* Get advertisement EEE */
+ val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
+ MDIO_MMD_AN, phydev->addr);
+ if (val < 0)
+ return val;
+ data->advertised = phy_eee_to_adv(val);
+
+ /* Get LP advertisement EEE */
+ val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
+ MDIO_MMD_AN, phydev->addr);
+ if (val < 0)
+ return val;
+ data->lp_advertised = phy_eee_to_adv(val);
+
+ return 0;
+}
+EXPORT_SYMBOL(phy_ethtool_get_eee);
+
+/**
+ * phy_ethtool_set_eee - set EEE supported and status
+ * @phydev: target phy_device struct
+ * @data: ethtool_eee data
+ *
+ * Description: it is to program the Advertisement EEE register.
+ */
+int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
+{
+ int val;
+
+ val = phy_adv_to_eee(data->advertised);
+ phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
+ phydev->addr, val);
+
+ return 0;
+}
+EXPORT_SYMBOL(phy_ethtool_set_eee);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index de86a55..8af46e8 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -14,6 +14,9 @@
* option) any later version.
*
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
@@ -149,8 +152,8 @@ int phy_scan_fixups(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_scan_fixups);
-static struct phy_device* phy_device_create(struct mii_bus *bus,
- int addr, int phy_id)
+struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
+ bool is_c45, struct phy_c45_device_ids *c45_ids)
{
struct phy_device *dev;
@@ -171,8 +174,11 @@ static struct phy_device* phy_device_create(struct mii_bus *bus,
dev->autoneg = AUTONEG_ENABLE;
+ dev->is_c45 = is_c45;
dev->addr = addr;
dev->phy_id = phy_id;
+ if (c45_ids)
+ dev->c45_ids = *c45_ids;
dev->bus = bus;
dev->dev.parent = bus->parent;
dev->dev.bus = &mdio_bus_type;
@@ -197,20 +203,99 @@ static struct phy_device* phy_device_create(struct mii_bus *bus,
return dev;
}
+EXPORT_SYMBOL(phy_device_create);
+
+/**
+ * get_phy_c45_ids - reads the specified addr for its 802.3-c45 IDs.
+ * @bus: the target MII bus
+ * @addr: PHY address on the MII bus
+ * @phy_id: where to store the ID retrieved.
+ * @c45_ids: where to store the c45 ID information.
+ *
+ * If the PHY devices-in-package appears to be valid, it and the
+ * corresponding identifiers are stored in @c45_ids, zero is stored
+ * in @phy_id. Otherwise 0xffffffff is stored in @phy_id. Returns
+ * zero on success.
+ *
+ */
+static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
+ struct phy_c45_device_ids *c45_ids) {
+ int phy_reg;
+ int i, reg_addr;
+ const int num_ids = ARRAY_SIZE(c45_ids->device_ids);
+
+ /* Find first non-zero Devices In package. Device
+ * zero is reserved, so don't probe it.
+ */
+ for (i = 1;
+ i < num_ids && c45_ids->devices_in_package == 0;
+ i++) {
+ reg_addr = MII_ADDR_C45 | i << 16 | 6;
+ phy_reg = mdiobus_read(bus, addr, reg_addr);
+ if (phy_reg < 0)
+ return -EIO;
+ c45_ids->devices_in_package = (phy_reg & 0xffff) << 16;
+
+ reg_addr = MII_ADDR_C45 | i << 16 | 5;
+ phy_reg = mdiobus_read(bus, addr, reg_addr);
+ if (phy_reg < 0)
+ return -EIO;
+ c45_ids->devices_in_package |= (phy_reg & 0xffff);
+
+ /* If mostly Fs, there is no device there,
+ * let's get out of here.
+ */
+ if ((c45_ids->devices_in_package & 0x1fffffff) == 0x1fffffff) {
+ *phy_id = 0xffffffff;
+ return 0;
+ }
+ }
+
+ /* Now probe Device Identifiers for each device present. */
+ for (i = 1; i < num_ids; i++) {
+ if (!(c45_ids->devices_in_package & (1 << i)))
+ continue;
+
+ reg_addr = MII_ADDR_C45 | i << 16 | MII_PHYSID1;
+ phy_reg = mdiobus_read(bus, addr, reg_addr);
+ if (phy_reg < 0)
+ return -EIO;
+ c45_ids->device_ids[i] = (phy_reg & 0xffff) << 16;
+
+ reg_addr = MII_ADDR_C45 | i << 16 | MII_PHYSID2;
+ phy_reg = mdiobus_read(bus, addr, reg_addr);
+ if (phy_reg < 0)
+ return -EIO;
+ c45_ids->device_ids[i] |= (phy_reg & 0xffff);
+ }
+ *phy_id = 0;
+ return 0;
+}
/**
* get_phy_id - reads the specified addr for its ID.
* @bus: the target MII bus
* @addr: PHY address on the MII bus
* @phy_id: where to store the ID retrieved.
+ * @is_c45: If true the PHY uses the 802.3 clause 45 protocol
+ * @c45_ids: where to store the c45 ID information.
+ *
+ * Description: In the case of a 802.3-c22 PHY, reads the ID registers
+ * of the PHY at @addr on the @bus, stores it in @phy_id and returns
+ * zero on success.
+ *
+ * In the case of a 802.3-c45 PHY, get_phy_c45_ids() is invoked, and
+ * its return value is in turn returned.
*
- * Description: Reads the ID registers of the PHY at @addr on the
- * @bus, stores it in @phy_id and returns zero on success.
*/
-static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
+static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
+ bool is_c45, struct phy_c45_device_ids *c45_ids)
{
int phy_reg;
+ if (is_c45)
+ return get_phy_c45_ids(bus, addr, phy_id, c45_ids);
+
/* Grab the bits from PHYIR1, and put them
* in the upper half */
phy_reg = mdiobus_read(bus, addr, MII_PHYSID1);
@@ -235,17 +320,19 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
* get_phy_device - reads the specified PHY device and returns its @phy_device struct
* @bus: the target MII bus
* @addr: PHY address on the MII bus
+ * @is_c45: If true the PHY uses the 802.3 clause 45 protocol
*
* Description: Reads the ID registers of the PHY at @addr on the
* @bus, then allocates and returns the phy_device to represent it.
*/
-struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
+struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
{
+ struct phy_c45_device_ids c45_ids = {0};
struct phy_device *dev = NULL;
- u32 phy_id;
+ u32 phy_id = 0;
int r;
- r = get_phy_id(bus, addr, &phy_id);
+ r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
if (r)
return ERR_PTR(r);
@@ -253,7 +340,7 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
if ((phy_id & 0x1fffffff) == 0x1fffffff)
return NULL;
- dev = phy_device_create(bus, addr, phy_id);
+ dev = phy_device_create(bus, addr, phy_id, is_c45, &c45_ids);
return dev;
}
@@ -446,6 +533,11 @@ static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
/* Assume that if there is no driver, that it doesn't
* exist, and we should use the genphy driver. */
if (NULL == d->driver) {
+ if (phydev->is_c45) {
+ pr_err("No driver for phy %x\n", phydev->phy_id);
+ return -ENODEV;
+ }
+
d->driver = &genphy_driver.driver;
err = d->driver->probe(d);
@@ -975,8 +1067,8 @@ int phy_driver_register(struct phy_driver *new_driver)
retval = driver_register(&new_driver->driver);
if (retval) {
- printk(KERN_ERR "%s: Error %d in registering driver\n",
- new_driver->name, retval);
+ pr_err("%s: Error %d in registering driver\n",
+ new_driver->name, retval);
return retval;
}
@@ -987,12 +1079,37 @@ int phy_driver_register(struct phy_driver *new_driver)
}
EXPORT_SYMBOL(phy_driver_register);
+int phy_drivers_register(struct phy_driver *new_driver, int n)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < n; i++) {
+ ret = phy_driver_register(new_driver + i);
+ if (ret) {
+ while (i-- > 0)
+ phy_driver_unregister(new_driver + i);
+ break;
+ }
+ }
+ return ret;
+}
+EXPORT_SYMBOL(phy_drivers_register);
+
void phy_driver_unregister(struct phy_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL(phy_driver_unregister);
+void phy_drivers_unregister(struct phy_driver *drv, int n)
+{
+ int i;
+ for (i = 0; i < n; i++) {
+ phy_driver_unregister(drv + i);
+ }
+}
+EXPORT_SYMBOL(phy_drivers_unregister);
+
static struct phy_driver genphy_driver = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index f414ffb..72f9347 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -65,11 +65,7 @@ static struct phy_driver rtl821x_driver = {
static int __init realtek_init(void)
{
- int ret;
-
- ret = phy_driver_register(&rtl821x_driver);
-
- return ret;
+ return phy_driver_register(&rtl821x_driver);
}
static void __exit realtek_exit(void)
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index fc3e7e9..c6b06d3 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -61,7 +61,8 @@ static int lan911x_config_init(struct phy_device *phydev)
return smsc_phy_ack_interrupt(phydev);
}
-static struct phy_driver lan83c185_driver = {
+static struct phy_driver smsc_phy_driver[] = {
+{
.phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN83C185",
@@ -83,9 +84,7 @@ static struct phy_driver lan83c185_driver = {
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, }
-};
-
-static struct phy_driver lan8187_driver = {
+}, {
.phy_id = 0x0007c0b0, /* OUI=0x00800f, Model#=0x0b */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8187",
@@ -107,9 +106,7 @@ static struct phy_driver lan8187_driver = {
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, }
-};
-
-static struct phy_driver lan8700_driver = {
+}, {
.phy_id = 0x0007c0c0, /* OUI=0x00800f, Model#=0x0c */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8700",
@@ -131,9 +128,7 @@ static struct phy_driver lan8700_driver = {
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, }
-};
-
-static struct phy_driver lan911x_int_driver = {
+}, {
.phy_id = 0x0007c0d0, /* OUI=0x00800f, Model#=0x0d */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN911x Internal PHY",
@@ -155,9 +150,7 @@ static struct phy_driver lan911x_int_driver = {
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, }
-};
-
-static struct phy_driver lan8710_driver = {
+}, {
.phy_id = 0x0007c0f0, /* OUI=0x00800f, Model#=0x0f */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8710/LAN8720",
@@ -179,53 +172,18 @@ static struct phy_driver lan8710_driver = {
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, }
-};
+} };
static int __init smsc_init(void)
{
- int ret;
-
- ret = phy_driver_register (&lan83c185_driver);
- if (ret)
- goto err1;
-
- ret = phy_driver_register (&lan8187_driver);
- if (ret)
- goto err2;
-
- ret = phy_driver_register (&lan8700_driver);
- if (ret)
- goto err3;
-
- ret = phy_driver_register (&lan911x_int_driver);
- if (ret)
- goto err4;
-
- ret = phy_driver_register (&lan8710_driver);
- if (ret)
- goto err5;
-
- return 0;
-
-err5:
- phy_driver_unregister (&lan911x_int_driver);
-err4:
- phy_driver_unregister (&lan8700_driver);
-err3:
- phy_driver_unregister (&lan8187_driver);
-err2:
- phy_driver_unregister (&lan83c185_driver);
-err1:
- return ret;
+ return phy_drivers_register(smsc_phy_driver,
+ ARRAY_SIZE(smsc_phy_driver));
}
static void __exit smsc_exit(void)
{
- phy_driver_unregister (&lan8710_driver);
- phy_driver_unregister (&lan911x_int_driver);
- phy_driver_unregister (&lan8700_driver);
- phy_driver_unregister (&lan8187_driver);
- phy_driver_unregister (&lan83c185_driver);
+ return phy_drivers_unregister(smsc_phy_driver,
+ ARRAY_SIZE(smsc_phy_driver));
}
MODULE_DESCRIPTION("SMSC PHY driver");
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 4eb98bc..1c3abce 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -11,6 +11,8 @@
* by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -356,7 +358,7 @@ static struct spi_driver ks8995_driver = {
static int __init ks8995_init(void)
{
- printk(KERN_INFO DRV_DESC " version " DRV_VERSION"\n");
+ pr_info(DRV_DESC " version " DRV_VERSION "\n");
return spi_register_driver(&ks8995_driver);
}
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 187a2fa..5e1eb13 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -81,7 +81,8 @@ static int ste10Xp_ack_interrupt(struct phy_device *phydev)
return 0;
}
-static struct phy_driver ste101p_pdriver = {
+static struct phy_driver ste10xp_pdriver[] = {
+{
.phy_id = STE101P_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "STe101p",
@@ -95,9 +96,7 @@ static struct phy_driver ste101p_pdriver = {
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = {.owner = THIS_MODULE,}
-};
-
-static struct phy_driver ste100p_pdriver = {
+}, {
.phy_id = STE100P_PHY_ID,
.phy_id_mask = 0xffffffff,
.name = "STe100p",
@@ -111,22 +110,18 @@ static struct phy_driver ste100p_pdriver = {
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = {.owner = THIS_MODULE,}
-};
+} };
static int __init ste10Xp_init(void)
{
- int retval;
-
- retval = phy_driver_register(&ste100p_pdriver);
- if (retval < 0)
- return retval;
- return phy_driver_register(&ste101p_pdriver);
+ return phy_drivers_register(ste10xp_pdriver,
+ ARRAY_SIZE(ste10xp_pdriver));
}
static void __exit ste10Xp_exit(void)
{
- phy_driver_unregister(&ste100p_pdriver);
- phy_driver_unregister(&ste101p_pdriver);
+ phy_drivers_unregister(ste10xp_pdriver,
+ ARRAY_SIZE(ste10xp_pdriver));
}
module_init(ste10Xp_init);
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 0ec8e09..2585c38 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -138,21 +138,6 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
return err;
}
-/* Vitesse 824x */
-static struct phy_driver vsc8244_driver = {
- .phy_id = PHY_ID_VSC8244,
- .name = "Vitesse VSC8244",
- .phy_id_mask = 0x000fffc0,
- .features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
- .config_init = &vsc824x_config_init,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
- .ack_interrupt = &vsc824x_ack_interrupt,
- .config_intr = &vsc82xx_config_intr,
- .driver = { .owner = THIS_MODULE,},
-};
-
static int vsc8221_config_init(struct phy_device *phydev)
{
int err;
@@ -165,8 +150,22 @@ static int vsc8221_config_init(struct phy_device *phydev)
Options are 802.3Z SerDes or SGMII */
}
-/* Vitesse 8221 */
-static struct phy_driver vsc8221_driver = {
+/* Vitesse 824x */
+static struct phy_driver vsc82xx_driver[] = {
+{
+ .phy_id = PHY_ID_VSC8244,
+ .name = "Vitesse VSC8244",
+ .phy_id_mask = 0x000fffc0,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = &vsc824x_config_init,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &vsc824x_ack_interrupt,
+ .config_intr = &vsc82xx_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+}, {
+ /* Vitesse 8221 */
.phy_id = PHY_ID_VSC8221,
.phy_id_mask = 0x000ffff0,
.name = "Vitesse VSC8221",
@@ -177,26 +176,19 @@ static struct phy_driver vsc8221_driver = {
.read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
- .driver = { .owner = THIS_MODULE,},
-};
+ .driver = { .owner = THIS_MODULE,},
+} };
static int __init vsc82xx_init(void)
{
- int err;
-
- err = phy_driver_register(&vsc8244_driver);
- if (err < 0)
- return err;
- err = phy_driver_register(&vsc8221_driver);
- if (err < 0)
- phy_driver_unregister(&vsc8244_driver);
- return err;
+ return phy_drivers_register(vsc82xx_driver,
+ ARRAY_SIZE(vsc82xx_driver));
}
static void __exit vsc82xx_exit(void)
{
- phy_driver_unregister(&vsc8244_driver);
- phy_driver_unregister(&vsc8221_driver);
+ return phy_drivers_unregister(vsc82xx_driver,
+ ARRAY_SIZE(vsc82xx_driver));
}
module_init(vsc82xx_init);
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index d4c9db3..a34d6bf 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -390,10 +390,10 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
#endif
#ifdef CONFIG_SLIP_MODE_SLIP6
if (sl->mode & SL_MODE_SLIP6)
- count = slip_esc6(p, (unsigned char *) sl->xbuff, len);
+ count = slip_esc6(p, sl->xbuff, len);
else
#endif
- count = slip_esc(p, (unsigned char *) sl->xbuff, len);
+ count = slip_esc(p, sl->xbuff, len);
/* Order of next two lines is *very* important.
* When we are sending a little amount of data,
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
index 89024d5..6a7260b 100644
--- a/drivers/net/team/Kconfig
+++ b/drivers/net/team/Kconfig
@@ -15,6 +15,17 @@ menuconfig NET_TEAM
if NET_TEAM
+config NET_TEAM_MODE_BROADCAST
+ tristate "Broadcast mode support"
+ depends on NET_TEAM
+ ---help---
+ Basic mode where packets are transmitted always by all suitable ports.
+
+ All added ports are setup to have team's mac address.
+
+ To compile this team mode as a module, choose M here: the module
+ will be called team_mode_broadcast.
+
config NET_TEAM_MODE_ROUNDROBIN
tristate "Round-robin mode support"
depends on NET_TEAM
@@ -22,7 +33,7 @@ config NET_TEAM_MODE_ROUNDROBIN
Basic mode where port used for transmitting packets is selected in
round-robin fashion using packet counter.
- All added ports are setup to have bond's mac address.
+ All added ports are setup to have team's mac address.
To compile this team mode as a module, choose M here: the module
will be called team_mode_roundrobin.
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
index fb9f4c1..9757630 100644
--- a/drivers/net/team/Makefile
+++ b/drivers/net/team/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_NET_TEAM) += team.o
+obj-$(CONFIG_NET_TEAM_MODE_BROADCAST) += team_mode_broadcast.o
obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o
obj-$(CONFIG_NET_TEAM_MODE_LOADBALANCE) += team_mode_loadbalance.o
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index c61ae35..3620c63 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1,5 +1,5 @@
/*
- * net/drivers/team/team.c - Network team device driver
+ * drivers/net/team/team.c - Network team device driver
* Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -82,14 +82,16 @@ static void team_refresh_port_linkup(struct team_port *port)
port->state.linkup;
}
+
/*******************
* Options handling
*******************/
struct team_option_inst { /* One for each option instance */
struct list_head list;
+ struct list_head tmp_list;
struct team_option *option;
- struct team_port *port; /* != NULL if per-port */
+ struct team_option_inst_info info;
bool changed;
bool removed;
};
@@ -106,22 +108,6 @@ static struct team_option *__team_find_option(struct team *team,
return NULL;
}
-static int __team_option_inst_add(struct team *team, struct team_option *option,
- struct team_port *port)
-{
- struct team_option_inst *opt_inst;
-
- opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
- if (!opt_inst)
- return -ENOMEM;
- opt_inst->option = option;
- opt_inst->port = port;
- opt_inst->changed = true;
- opt_inst->removed = false;
- list_add_tail(&opt_inst->list, &team->option_inst_list);
- return 0;
-}
-
static void __team_option_inst_del(struct team_option_inst *opt_inst)
{
list_del(&opt_inst->list);
@@ -139,14 +125,49 @@ static void __team_option_inst_del_option(struct team *team,
}
}
+static int __team_option_inst_add(struct team *team, struct team_option *option,
+ struct team_port *port)
+{
+ struct team_option_inst *opt_inst;
+ unsigned int array_size;
+ unsigned int i;
+ int err;
+
+ array_size = option->array_size;
+ if (!array_size)
+ array_size = 1; /* No array but still need one instance */
+
+ for (i = 0; i < array_size; i++) {
+ opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
+ if (!opt_inst)
+ return -ENOMEM;
+ opt_inst->option = option;
+ opt_inst->info.port = port;
+ opt_inst->info.array_index = i;
+ opt_inst->changed = true;
+ opt_inst->removed = false;
+ list_add_tail(&opt_inst->list, &team->option_inst_list);
+ if (option->init) {
+ err = option->init(team, &opt_inst->info);
+ if (err)
+ return err;
+ }
+
+ }
+ return 0;
+}
+
static int __team_option_inst_add_option(struct team *team,
struct team_option *option)
{
struct team_port *port;
int err;
- if (!option->per_port)
- return __team_option_inst_add(team, option, 0);
+ if (!option->per_port) {
+ err = __team_option_inst_add(team, option, NULL);
+ if (err)
+ goto inst_del_option;
+ }
list_for_each_entry(port, &team->port_list, list) {
err = __team_option_inst_add(team, option, port);
@@ -180,7 +201,7 @@ static void __team_option_inst_del_port(struct team *team,
list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
if (opt_inst->option->per_port &&
- opt_inst->port == port)
+ opt_inst->info.port == port)
__team_option_inst_del(opt_inst);
}
}
@@ -211,7 +232,7 @@ static void __team_option_inst_mark_removed_port(struct team *team,
struct team_option_inst *opt_inst;
list_for_each_entry(opt_inst, &team->option_inst_list, list) {
- if (opt_inst->port == port) {
+ if (opt_inst->info.port == port) {
opt_inst->changed = true;
opt_inst->removed = true;
}
@@ -324,28 +345,12 @@ void team_options_unregister(struct team *team,
}
EXPORT_SYMBOL(team_options_unregister);
-static int team_option_port_add(struct team *team, struct team_port *port)
-{
- int err;
-
- err = __team_option_inst_add_port(team, port);
- if (err)
- return err;
- __team_options_change_check(team);
- return 0;
-}
-
-static void team_option_port_del(struct team *team, struct team_port *port)
-{
- __team_option_inst_mark_removed_port(team, port);
- __team_options_change_check(team);
- __team_option_inst_del_port(team, port);
-}
-
static int team_option_get(struct team *team,
struct team_option_inst *opt_inst,
struct team_gsetter_ctx *ctx)
{
+ if (!opt_inst->option->getter)
+ return -EOPNOTSUPP;
return opt_inst->option->getter(team, ctx);
}
@@ -353,16 +358,26 @@ static int team_option_set(struct team *team,
struct team_option_inst *opt_inst,
struct team_gsetter_ctx *ctx)
{
- int err;
+ if (!opt_inst->option->setter)
+ return -EOPNOTSUPP;
+ return opt_inst->option->setter(team, ctx);
+}
- err = opt_inst->option->setter(team, ctx);
- if (err)
- return err;
+void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
+{
+ struct team_option_inst *opt_inst;
+ opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
opt_inst->changed = true;
+}
+EXPORT_SYMBOL(team_option_inst_set_change);
+
+void team_options_change_check(struct team *team)
+{
__team_options_change_check(team);
- return err;
}
+EXPORT_SYMBOL(team_options_change_check);
+
/****************
* Mode handling
@@ -371,13 +386,18 @@ static int team_option_set(struct team *team,
static LIST_HEAD(mode_list);
static DEFINE_SPINLOCK(mode_list_lock);
-static struct team_mode *__find_mode(const char *kind)
+struct team_mode_item {
+ struct list_head list;
+ const struct team_mode *mode;
+};
+
+static struct team_mode_item *__find_mode(const char *kind)
{
- struct team_mode *mode;
+ struct team_mode_item *mitem;
- list_for_each_entry(mode, &mode_list, list) {
- if (strcmp(mode->kind, kind) == 0)
- return mode;
+ list_for_each_entry(mitem, &mode_list, list) {
+ if (strcmp(mitem->mode->kind, kind) == 0)
+ return mitem;
}
return NULL;
}
@@ -392,49 +412,65 @@ static bool is_good_mode_name(const char *name)
return true;
}
-int team_mode_register(struct team_mode *mode)
+int team_mode_register(const struct team_mode *mode)
{
int err = 0;
+ struct team_mode_item *mitem;
if (!is_good_mode_name(mode->kind) ||
mode->priv_size > TEAM_MODE_PRIV_SIZE)
return -EINVAL;
+
+ mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
+ if (!mitem)
+ return -ENOMEM;
+
spin_lock(&mode_list_lock);
if (__find_mode(mode->kind)) {
err = -EEXIST;
+ kfree(mitem);
goto unlock;
}
- list_add_tail(&mode->list, &mode_list);
+ mitem->mode = mode;
+ list_add_tail(&mitem->list, &mode_list);
unlock:
spin_unlock(&mode_list_lock);
return err;
}
EXPORT_SYMBOL(team_mode_register);
-int team_mode_unregister(struct team_mode *mode)
+void team_mode_unregister(const struct team_mode *mode)
{
+ struct team_mode_item *mitem;
+
spin_lock(&mode_list_lock);
- list_del_init(&mode->list);
+ mitem = __find_mode(mode->kind);
+ if (mitem) {
+ list_del_init(&mitem->list);
+ kfree(mitem);
+ }
spin_unlock(&mode_list_lock);
- return 0;
}
EXPORT_SYMBOL(team_mode_unregister);
-static struct team_mode *team_mode_get(const char *kind)
+static const struct team_mode *team_mode_get(const char *kind)
{
- struct team_mode *mode;
+ struct team_mode_item *mitem;
+ const struct team_mode *mode = NULL;
spin_lock(&mode_list_lock);
- mode = __find_mode(kind);
- if (!mode) {
+ mitem = __find_mode(kind);
+ if (!mitem) {
spin_unlock(&mode_list_lock);
request_module("team-mode-%s", kind);
spin_lock(&mode_list_lock);
- mode = __find_mode(kind);
+ mitem = __find_mode(kind);
}
- if (mode)
+ if (mitem) {
+ mode = mitem->mode;
if (!try_module_get(mode->owner))
mode = NULL;
+ }
spin_unlock(&mode_list_lock);
return mode;
@@ -458,26 +494,45 @@ rx_handler_result_t team_dummy_receive(struct team *team,
return RX_HANDLER_ANOTHER;
}
-static void team_adjust_ops(struct team *team)
+static const struct team_mode __team_no_mode = {
+ .kind = "*NOMODE*",
+};
+
+static bool team_is_mode_set(struct team *team)
+{
+ return team->mode != &__team_no_mode;
+}
+
+static void team_set_no_mode(struct team *team)
+{
+ team->mode = &__team_no_mode;
+}
+
+static void __team_adjust_ops(struct team *team, int en_port_count)
{
/*
* To avoid checks in rx/tx skb paths, ensure here that non-null and
* correct ops are always set.
*/
- if (list_empty(&team->port_list) ||
- !team->mode || !team->mode->ops->transmit)
+ if (!en_port_count || !team_is_mode_set(team) ||
+ !team->mode->ops->transmit)
team->ops.transmit = team_dummy_transmit;
else
team->ops.transmit = team->mode->ops->transmit;
- if (list_empty(&team->port_list) ||
- !team->mode || !team->mode->ops->receive)
+ if (!en_port_count || !team_is_mode_set(team) ||
+ !team->mode->ops->receive)
team->ops.receive = team_dummy_receive;
else
team->ops.receive = team->mode->ops->receive;
}
+static void team_adjust_ops(struct team *team)
+{
+ __team_adjust_ops(team, team->en_port_count);
+}
+
/*
* We can benefit from the fact that it's ensured no port is present
* at the time of mode change. Therefore no packets are in fly so there's no
@@ -487,7 +542,7 @@ static int __team_change_mode(struct team *team,
const struct team_mode *new_mode)
{
/* Check if mode was previously set and do cleanup if so */
- if (team->mode) {
+ if (team_is_mode_set(team)) {
void (*exit_op)(struct team *team) = team->ops.exit;
/* Clear ops area so no callback is called any longer */
@@ -497,7 +552,7 @@ static int __team_change_mode(struct team *team,
if (exit_op)
exit_op(team);
team_mode_put(team->mode);
- team->mode = NULL;
+ team_set_no_mode(team);
/* zero private data area */
memset(&team->mode_priv, 0,
sizeof(struct team) - offsetof(struct team, mode_priv));
@@ -523,7 +578,7 @@ static int __team_change_mode(struct team *team,
static int team_change_mode(struct team *team, const char *kind)
{
- struct team_mode *new_mode;
+ const struct team_mode *new_mode;
struct net_device *dev = team->dev;
int err;
@@ -532,7 +587,7 @@ static int team_change_mode(struct team *team, const char *kind)
return -EBUSY;
}
- if (team->mode && strcmp(team->mode->kind, kind) == 0) {
+ if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
netdev_err(dev, "Unable to change to the same mode the team is in\n");
return -EINVAL;
}
@@ -559,8 +614,6 @@ static int team_change_mode(struct team *team, const char *kind)
* Rx path frame handler
************************/
-static bool team_port_enabled(struct team_port *port);
-
/* note: already called with rcu_read_lock */
static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
{
@@ -618,11 +671,6 @@ static bool team_port_find(const struct team *team,
return false;
}
-static bool team_port_enabled(struct team_port *port)
-{
- return port->index != -1;
-}
-
/*
* Enable/disable port by adding to enabled port hashlist and setting
* port->index (Might be racy so reader could see incorrect ifindex when
@@ -637,6 +685,9 @@ static void team_port_enable(struct team *team,
port->index = team->en_port_count++;
hlist_add_head_rcu(&port->hlist,
team_port_index_hash(team, port->index));
+ team_adjust_ops(team);
+ if (team->ops.port_enabled)
+ team->ops.port_enabled(team, port);
}
static void __reconstruct_port_hlist(struct team *team, int rm_index)
@@ -656,14 +707,20 @@ static void __reconstruct_port_hlist(struct team *team, int rm_index)
static void team_port_disable(struct team *team,
struct team_port *port)
{
- int rm_index = port->index;
-
if (!team_port_enabled(port))
return;
+ if (team->ops.port_disabled)
+ team->ops.port_disabled(team, port);
hlist_del_rcu(&port->hlist);
- __reconstruct_port_hlist(team, rm_index);
- team->en_port_count--;
+ __reconstruct_port_hlist(team, port->index);
port->index = -1;
+ __team_adjust_ops(team, team->en_port_count - 1);
+ /*
+ * Wait until readers see adjusted ops. This ensures that
+ * readers never see team->en_port_count == 0
+ */
+ synchronize_rcu();
+ team->en_port_count--;
}
#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
@@ -758,7 +815,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
return -EBUSY;
}
- port = kzalloc(sizeof(struct team_port), GFP_KERNEL);
+ port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
+ GFP_KERNEL);
if (!port)
return -ENOMEM;
@@ -809,7 +867,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_handler_register;
}
- err = team_option_port_add(team, port);
+ err = __team_option_inst_add_port(team, port);
if (err) {
netdev_err(dev, "Device %s failed to add per-port options\n",
portname);
@@ -819,9 +877,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
port->index = -1;
team_port_enable(team, port);
list_add_tail_rcu(&port->list, &team->port_list);
- team_adjust_ops(team);
__team_compute_features(team);
__team_port_change_check(port, !!netif_carrier_ok(port_dev));
+ __team_options_change_check(team);
netdev_info(dev, "Port device %s added\n", portname);
@@ -865,12 +923,13 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
return -ENOENT;
}
+ __team_option_inst_mark_removed_port(team, port);
+ __team_options_change_check(team);
+ __team_option_inst_del_port(team, port);
port->removed = true;
__team_port_change_check(port, false);
team_port_disable(team, port);
list_del_rcu(&port->list);
- team_adjust_ops(team);
- team_option_port_del(team, port);
netdev_rx_handler_unregister(port_dev);
netdev_set_master(port_dev, NULL);
vlan_vids_del_by_dev(port_dev, dev);
@@ -891,11 +950,9 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
* Net device ops
*****************/
-static const char team_no_mode_kind[] = "*NOMODE*";
-
static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
{
- ctx->data.str_val = team->mode ? team->mode->kind : team_no_mode_kind;
+ ctx->data.str_val = team->mode->kind;
return 0;
}
@@ -907,39 +964,47 @@ static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
static int team_port_en_option_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
- ctx->data.bool_val = team_port_enabled(ctx->port);
+ struct team_port *port = ctx->info->port;
+
+ ctx->data.bool_val = team_port_enabled(port);
return 0;
}
static int team_port_en_option_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
+ struct team_port *port = ctx->info->port;
+
if (ctx->data.bool_val)
- team_port_enable(team, ctx->port);
+ team_port_enable(team, port);
else
- team_port_disable(team, ctx->port);
+ team_port_disable(team, port);
return 0;
}
static int team_user_linkup_option_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
- ctx->data.bool_val = ctx->port->user.linkup;
+ struct team_port *port = ctx->info->port;
+
+ ctx->data.bool_val = port->user.linkup;
return 0;
}
static int team_user_linkup_option_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
- ctx->port->user.linkup = ctx->data.bool_val;
- team_refresh_port_linkup(ctx->port);
+ struct team_port *port = ctx->info->port;
+
+ port->user.linkup = ctx->data.bool_val;
+ team_refresh_port_linkup(port);
return 0;
}
static int team_user_linkup_en_option_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
- struct team_port *port = ctx->port;
+ struct team_port *port = ctx->info->port;
ctx->data.bool_val = port->user.linkup_enabled;
return 0;
@@ -948,10 +1013,10 @@ static int team_user_linkup_en_option_get(struct team *team,
static int team_user_linkup_en_option_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
- struct team_port *port = ctx->port;
+ struct team_port *port = ctx->info->port;
port->user.linkup_enabled = ctx->data.bool_val;
- team_refresh_port_linkup(ctx->port);
+ team_refresh_port_linkup(port);
return 0;
}
@@ -993,6 +1058,7 @@ static int team_init(struct net_device *dev)
team->dev = dev;
mutex_init(&team->lock);
+ team_set_no_mode(team);
team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
if (!team->pcpu_stats)
@@ -1116,10 +1182,11 @@ static int team_set_mac_address(struct net_device *dev, void *p)
{
struct team *team = netdev_priv(dev);
struct team_port *port;
- struct sockaddr *addr = p;
+ int err;
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ err = eth_mac_addr(dev, p);
+ if (err)
+ return err;
rcu_read_lock();
list_for_each_entry_rcu(port, &team->port_list, list)
if (team->ops.port_change_mac)
@@ -1321,7 +1388,7 @@ static void team_setup(struct net_device *dev)
* bring us to promisc mode in case a unicast addr is added.
* Let this up to underlay drivers.
*/
- dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GRO;
@@ -1404,7 +1471,7 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
void *hdr;
int err;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -1466,7 +1533,7 @@ static int team_nl_send_generic(struct genl_info *info, struct team *team,
struct sk_buff *skb;
int err;
- skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -1482,16 +1549,128 @@ err_fill:
return err;
}
-static int team_nl_fill_options_get(struct sk_buff *skb,
- u32 pid, u32 seq, int flags,
- struct team *team, bool fillall)
+typedef int team_nl_send_func_t(struct sk_buff *skb,
+ struct team *team, u32 pid);
+
+static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 pid)
+{
+ return genlmsg_unicast(dev_net(team->dev), skb, pid);
+}
+
+static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
+ struct team_option_inst *opt_inst)
+{
+ struct nlattr *option_item;
+ struct team_option *option = opt_inst->option;
+ struct team_option_inst_info *opt_inst_info = &opt_inst->info;
+ struct team_gsetter_ctx ctx;
+ int err;
+
+ ctx.info = opt_inst_info;
+ err = team_option_get(team, opt_inst, &ctx);
+ if (err)
+ return err;
+
+ option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
+ if (!option_item)
+ return -EMSGSIZE;
+
+ if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
+ goto nest_cancel;
+ if (opt_inst_info->port &&
+ nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
+ opt_inst_info->port->dev->ifindex))
+ goto nest_cancel;
+ if (opt_inst->option->array_size &&
+ nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
+ opt_inst_info->array_index))
+ goto nest_cancel;
+
+ switch (option->type) {
+ case TEAM_OPTION_TYPE_U32:
+ if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
+ goto nest_cancel;
+ if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
+ goto nest_cancel;
+ break;
+ case TEAM_OPTION_TYPE_STRING:
+ if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
+ goto nest_cancel;
+ if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
+ ctx.data.str_val))
+ goto nest_cancel;
+ break;
+ case TEAM_OPTION_TYPE_BINARY:
+ if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
+ goto nest_cancel;
+ if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
+ ctx.data.bin_val.ptr))
+ goto nest_cancel;
+ break;
+ case TEAM_OPTION_TYPE_BOOL:
+ if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
+ goto nest_cancel;
+ if (ctx.data.bool_val &&
+ nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
+ goto nest_cancel;
+ break;
+ default:
+ BUG();
+ }
+ if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
+ goto nest_cancel;
+ if (opt_inst->changed) {
+ if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
+ goto nest_cancel;
+ opt_inst->changed = false;
+ }
+ nla_nest_end(skb, option_item);
+ return 0;
+
+nest_cancel:
+ nla_nest_cancel(skb, option_item);
+ return -EMSGSIZE;
+}
+
+static int __send_and_alloc_skb(struct sk_buff **pskb,
+ struct team *team, u32 pid,
+ team_nl_send_func_t *send_func)
+{
+ int err;
+
+ if (*pskb) {
+ err = send_func(*pskb, team, pid);
+ if (err)
+ return err;
+ }
+ *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!*pskb)
+ return -ENOMEM;
+ return 0;
+}
+
+static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq,
+ int flags, team_nl_send_func_t *send_func,
+ struct list_head *sel_opt_inst_list)
{
struct nlattr *option_list;
+ struct nlmsghdr *nlh;
void *hdr;
struct team_option_inst *opt_inst;
int err;
+ struct sk_buff *skb = NULL;
+ bool incomplete;
+ int i;
- hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
+ opt_inst = list_first_entry(sel_opt_inst_list,
+ struct team_option_inst, tmp_list);
+
+start_again:
+ err = __send_and_alloc_skb(&skb, team, pid, send_func);
+ if (err)
+ return err;
+
+ hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI,
TEAM_CMD_OPTIONS_GET);
if (IS_ERR(hdr))
return PTR_ERR(hdr);
@@ -1500,122 +1679,80 @@ static int team_nl_fill_options_get(struct sk_buff *skb,
goto nla_put_failure;
option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
if (!option_list)
- return -EMSGSIZE;
-
- list_for_each_entry(opt_inst, &team->option_inst_list, list) {
- struct nlattr *option_item;
- struct team_option *option = opt_inst->option;
- struct team_gsetter_ctx ctx;
+ goto nla_put_failure;
- /* Include only changed options if fill all mode is not on */
- if (!fillall && !opt_inst->changed)
- continue;
- option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
- if (!option_item)
- goto nla_put_failure;
- if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
- goto nla_put_failure;
- if (opt_inst->changed) {
- if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
- goto nla_put_failure;
- opt_inst->changed = false;
- }
- if (opt_inst->removed &&
- nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
- goto nla_put_failure;
- if (opt_inst->port &&
- nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
- opt_inst->port->dev->ifindex))
- goto nla_put_failure;
- ctx.port = opt_inst->port;
- switch (option->type) {
- case TEAM_OPTION_TYPE_U32:
- if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
- goto nla_put_failure;
- err = team_option_get(team, opt_inst, &ctx);
- if (err)
- goto errout;
- if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA,
- ctx.data.u32_val))
- goto nla_put_failure;
- break;
- case TEAM_OPTION_TYPE_STRING:
- if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
- goto nla_put_failure;
- err = team_option_get(team, opt_inst, &ctx);
- if (err)
- goto errout;
- if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
- ctx.data.str_val))
- goto nla_put_failure;
- break;
- case TEAM_OPTION_TYPE_BINARY:
- if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
- goto nla_put_failure;
- err = team_option_get(team, opt_inst, &ctx);
- if (err)
- goto errout;
- if (nla_put(skb, TEAM_ATTR_OPTION_DATA,
- ctx.data.bin_val.len, ctx.data.bin_val.ptr))
- goto nla_put_failure;
- break;
- case TEAM_OPTION_TYPE_BOOL:
- if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
- goto nla_put_failure;
- err = team_option_get(team, opt_inst, &ctx);
- if (err)
- goto errout;
- if (ctx.data.bool_val &&
- nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
- goto nla_put_failure;
- break;
- default:
- BUG();
+ i = 0;
+ incomplete = false;
+ list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
+ err = team_nl_fill_one_option_get(skb, team, opt_inst);
+ if (err) {
+ if (err == -EMSGSIZE) {
+ if (!i)
+ goto errout;
+ incomplete = true;
+ break;
+ }
+ goto errout;
}
- nla_nest_end(skb, option_item);
+ i++;
}
nla_nest_end(skb, option_list);
- return genlmsg_end(skb, hdr);
+ genlmsg_end(skb, hdr);
+ if (incomplete)
+ goto start_again;
+
+send_done:
+ nlh = nlmsg_put(skb, pid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
+ if (!nlh) {
+ err = __send_and_alloc_skb(&skb, team, pid, send_func);
+ if (err)
+ goto errout;
+ goto send_done;
+ }
+
+ return send_func(skb, team, pid);
nla_put_failure:
err = -EMSGSIZE;
errout:
genlmsg_cancel(skb, hdr);
+ nlmsg_free(skb);
return err;
}
-static int team_nl_fill_options_get_all(struct sk_buff *skb,
- struct genl_info *info, int flags,
- struct team *team)
-{
- return team_nl_fill_options_get(skb, info->snd_pid,
- info->snd_seq, NLM_F_ACK,
- team, true);
-}
-
static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
{
struct team *team;
+ struct team_option_inst *opt_inst;
int err;
+ LIST_HEAD(sel_opt_inst_list);
team = team_nl_team_get(info);
if (!team)
return -EINVAL;
- err = team_nl_send_generic(info, team, team_nl_fill_options_get_all);
+ list_for_each_entry(opt_inst, &team->option_inst_list, list)
+ list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
+ err = team_nl_send_options_get(team, info->snd_pid, info->snd_seq,
+ NLM_F_ACK, team_nl_send_unicast,
+ &sel_opt_inst_list);
team_nl_team_put(team);
return err;
}
+static int team_nl_send_event_options_get(struct team *team,
+ struct list_head *sel_opt_inst_list);
+
static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
{
struct team *team;
int err = 0;
int i;
struct nlattr *nl_option;
+ LIST_HEAD(opt_inst_list);
team = team_nl_team_get(info);
if (!team)
@@ -1629,10 +1766,12 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
- struct nlattr *attr_port_ifindex;
+ struct nlattr *attr;
struct nlattr *attr_data;
enum team_option_type opt_type;
int opt_port_ifindex = 0; /* != 0 for per-port options */
+ u32 opt_array_index = 0;
+ bool opt_is_array = false;
struct team_option_inst *opt_inst;
char *opt_name;
bool opt_found = false;
@@ -1674,23 +1813,33 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
}
opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
- attr_port_ifindex = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
- if (attr_port_ifindex)
- opt_port_ifindex = nla_get_u32(attr_port_ifindex);
+ attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
+ if (attr)
+ opt_port_ifindex = nla_get_u32(attr);
+
+ attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
+ if (attr) {
+ opt_is_array = true;
+ opt_array_index = nla_get_u32(attr);
+ }
list_for_each_entry(opt_inst, &team->option_inst_list, list) {
struct team_option *option = opt_inst->option;
struct team_gsetter_ctx ctx;
+ struct team_option_inst_info *opt_inst_info;
int tmp_ifindex;
- tmp_ifindex = opt_inst->port ?
- opt_inst->port->dev->ifindex : 0;
+ opt_inst_info = &opt_inst->info;
+ tmp_ifindex = opt_inst_info->port ?
+ opt_inst_info->port->dev->ifindex : 0;
if (option->type != opt_type ||
strcmp(option->name, opt_name) ||
- tmp_ifindex != opt_port_ifindex)
+ tmp_ifindex != opt_port_ifindex ||
+ (option->array_size && !opt_is_array) ||
+ opt_inst_info->array_index != opt_array_index)
continue;
opt_found = true;
- ctx.port = opt_inst->port;
+ ctx.info = opt_inst_info;
switch (opt_type) {
case TEAM_OPTION_TYPE_U32:
ctx.data.u32_val = nla_get_u32(attr_data);
@@ -1715,6 +1864,8 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
err = team_option_set(team, opt_inst, &ctx);
if (err)
goto team_put;
+ opt_inst->changed = true;
+ list_add(&opt_inst->tmp_list, &opt_inst_list);
}
if (!opt_found) {
err = -ENOENT;
@@ -1722,6 +1873,8 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
}
}
+ err = team_nl_send_event_options_get(team, &opt_inst_list);
+
team_put:
team_nl_team_put(team);
@@ -1746,7 +1899,7 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
goto nla_put_failure;
port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
if (!port_list)
- return -EMSGSIZE;
+ goto nla_put_failure;
list_for_each_entry(port, &team->port_list, list) {
struct nlattr *port_item;
@@ -1838,27 +1991,18 @@ static struct genl_multicast_group team_change_event_mcgrp = {
.name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
};
-static int team_nl_send_event_options_get(struct team *team)
+static int team_nl_send_multicast(struct sk_buff *skb,
+ struct team *team, u32 pid)
{
- struct sk_buff *skb;
- int err;
- struct net *net = dev_net(team->dev);
-
- skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- err = team_nl_fill_options_get(skb, 0, 0, 0, team, false);
- if (err < 0)
- goto err_fill;
-
- err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
- GFP_KERNEL);
- return err;
+ return genlmsg_multicast_netns(dev_net(team->dev), skb, 0,
+ team_change_event_mcgrp.id, GFP_KERNEL);
+}
-err_fill:
- nlmsg_free(skb);
- return err;
+static int team_nl_send_event_options_get(struct team *team,
+ struct list_head *sel_opt_inst_list)
+{
+ return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
+ sel_opt_inst_list);
}
static int team_nl_send_event_port_list_get(struct team *team)
@@ -1867,7 +2011,7 @@ static int team_nl_send_event_port_list_get(struct team *team)
int err;
struct net *net = dev_net(team->dev);
- skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -1918,10 +2062,17 @@ static void team_nl_fini(void)
static void __team_options_change_check(struct team *team)
{
int err;
+ struct team_option_inst *opt_inst;
+ LIST_HEAD(sel_opt_inst_list);
- err = team_nl_send_event_options_get(team);
+ list_for_each_entry(opt_inst, &team->option_inst_list, list) {
+ if (opt_inst->changed)
+ list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
+ }
+ err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
if (err)
- netdev_warn(team->dev, "Failed to send options change via netlink\n");
+ netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
+ err);
}
/* rtnl lock is held */
@@ -1965,6 +2116,7 @@ static void team_port_change_check(struct team_port *port, bool linkup)
mutex_unlock(&team->lock);
}
+
/************************************
* Net device notifier event handler
************************************/
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
index fd6bd03..253b8a5 100644
--- a/drivers/net/team/team_mode_activebackup.c
+++ b/drivers/net/team/team_mode_activebackup.c
@@ -1,5 +1,5 @@
/*
- * net/drivers/team/team_mode_activebackup.c - Active-backup mode for team
+ * drivers/net/team/team_mode_activebackup.c - Active-backup mode for team
* Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -40,7 +40,7 @@ static bool ab_transmit(struct team *team, struct sk_buff *skb)
{
struct team_port *active_port;
- active_port = rcu_dereference(ab_priv(team)->active_port);
+ active_port = rcu_dereference_bh(ab_priv(team)->active_port);
if (unlikely(!active_port))
goto drop;
skb->dev = active_port->dev;
@@ -61,8 +61,12 @@ static void ab_port_leave(struct team *team, struct team_port *port)
static int ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx)
{
- if (ab_priv(team)->active_port)
- ctx->data.u32_val = ab_priv(team)->active_port->dev->ifindex;
+ struct team_port *active_port;
+
+ active_port = rcu_dereference_protected(ab_priv(team)->active_port,
+ lockdep_is_held(&team->lock));
+ if (active_port)
+ ctx->data.u32_val = active_port->dev->ifindex;
else
ctx->data.u32_val = 0;
return 0;
@@ -108,7 +112,7 @@ static const struct team_mode_ops ab_mode_ops = {
.port_leave = ab_port_leave,
};
-static struct team_mode ab_mode = {
+static const struct team_mode ab_mode = {
.kind = "activebackup",
.owner = THIS_MODULE,
.priv_size = sizeof(struct ab_priv),
diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c
new file mode 100644
index 0000000..5562345
--- /dev/null
+++ b/drivers/net/team/team_mode_broadcast.c
@@ -0,0 +1,88 @@
+/*
+ * drivers/net/team/team_mode_broadcast.c - Broadcast mode for team
+ * Copyright (c) 2012 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/if_team.h>
+
+static bool bc_transmit(struct team *team, struct sk_buff *skb)
+{
+ struct team_port *cur;
+ struct team_port *last = NULL;
+ struct sk_buff *skb2;
+ bool ret;
+ bool sum_ret = false;
+
+ list_for_each_entry_rcu(cur, &team->port_list, list) {
+ if (team_port_txable(cur)) {
+ if (last) {
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (skb2) {
+ skb2->dev = last->dev;
+ ret = dev_queue_xmit(skb2);
+ if (!sum_ret)
+ sum_ret = ret;
+ }
+ }
+ last = cur;
+ }
+ }
+ if (last) {
+ skb->dev = last->dev;
+ ret = dev_queue_xmit(skb);
+ if (!sum_ret)
+ sum_ret = ret;
+ }
+ return sum_ret;
+}
+
+static int bc_port_enter(struct team *team, struct team_port *port)
+{
+ return team_port_set_team_mac(port);
+}
+
+static void bc_port_change_mac(struct team *team, struct team_port *port)
+{
+ team_port_set_team_mac(port);
+}
+
+static const struct team_mode_ops bc_mode_ops = {
+ .transmit = bc_transmit,
+ .port_enter = bc_port_enter,
+ .port_change_mac = bc_port_change_mac,
+};
+
+static const struct team_mode bc_mode = {
+ .kind = "broadcast",
+ .owner = THIS_MODULE,
+ .ops = &bc_mode_ops,
+};
+
+static int __init bc_init_module(void)
+{
+ return team_mode_register(&bc_mode);
+}
+
+static void __exit bc_cleanup_module(void)
+{
+ team_mode_unregister(&bc_mode);
+}
+
+module_init(bc_init_module);
+module_exit(bc_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Broadcast mode for team");
+MODULE_ALIAS("team-mode-broadcast");
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index 86e8183..51a4b19 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -17,34 +17,210 @@
#include <linux/filter.h>
#include <linux/if_team.h>
+struct lb_priv;
+
+typedef struct team_port *lb_select_tx_port_func_t(struct team *,
+ struct lb_priv *,
+ struct sk_buff *,
+ unsigned char);
+
+#define LB_TX_HASHTABLE_SIZE 256 /* hash is a char */
+
+struct lb_stats {
+ u64 tx_bytes;
+};
+
+struct lb_pcpu_stats {
+ struct lb_stats hash_stats[LB_TX_HASHTABLE_SIZE];
+ struct u64_stats_sync syncp;
+};
+
+struct lb_stats_info {
+ struct lb_stats stats;
+ struct lb_stats last_stats;
+ struct team_option_inst_info *opt_inst_info;
+};
+
+struct lb_port_mapping {
+ struct team_port __rcu *port;
+ struct team_option_inst_info *opt_inst_info;
+};
+
+struct lb_priv_ex {
+ struct team *team;
+ struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE];
+ struct sock_fprog *orig_fprog;
+ struct {
+ unsigned int refresh_interval; /* in tenths of second */
+ struct delayed_work refresh_dw;
+ struct lb_stats_info info[LB_TX_HASHTABLE_SIZE];
+ } stats;
+};
+
struct lb_priv {
struct sk_filter __rcu *fp;
- struct sock_fprog *orig_fprog;
+ lb_select_tx_port_func_t __rcu *select_tx_port_func;
+ struct lb_pcpu_stats __percpu *pcpu_stats;
+ struct lb_priv_ex *ex; /* priv extension */
};
-static struct lb_priv *lb_priv(struct team *team)
+static struct lb_priv *get_lb_priv(struct team *team)
{
return (struct lb_priv *) &team->mode_priv;
}
-static bool lb_transmit(struct team *team, struct sk_buff *skb)
+struct lb_port_priv {
+ struct lb_stats __percpu *pcpu_stats;
+ struct lb_stats_info stats_info;
+};
+
+static struct lb_port_priv *get_lb_port_priv(struct team_port *port)
+{
+ return (struct lb_port_priv *) &port->mode_priv;
+}
+
+#define LB_HTPM_PORT_BY_HASH(lp_priv, hash) \
+ (lb_priv)->ex->tx_hash_to_port_mapping[hash].port
+
+#define LB_HTPM_OPT_INST_INFO_BY_HASH(lp_priv, hash) \
+ (lb_priv)->ex->tx_hash_to_port_mapping[hash].opt_inst_info
+
+static void lb_tx_hash_to_port_mapping_null_port(struct team *team,
+ struct team_port *port)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ bool changed = false;
+ int i;
+
+ for (i = 0; i < LB_TX_HASHTABLE_SIZE; i++) {
+ struct lb_port_mapping *pm;
+
+ pm = &lb_priv->ex->tx_hash_to_port_mapping[i];
+ if (rcu_access_pointer(pm->port) == port) {
+ RCU_INIT_POINTER(pm->port, NULL);
+ team_option_inst_set_change(pm->opt_inst_info);
+ changed = true;
+ }
+ }
+ if (changed)
+ team_options_change_check(team);
+}
+
+/* Basic tx selection based solely by hash */
+static struct team_port *lb_hash_select_tx_port(struct team *team,
+ struct lb_priv *lb_priv,
+ struct sk_buff *skb,
+ unsigned char hash)
{
- struct sk_filter *fp;
- struct team_port *port;
- unsigned int hash;
int port_index;
- fp = rcu_dereference(lb_priv(team)->fp);
- if (unlikely(!fp))
- goto drop;
- hash = SK_RUN_FILTER(fp, skb);
port_index = hash % team->en_port_count;
- port = team_get_port_by_index_rcu(team, port_index);
+ return team_get_port_by_index_rcu(team, port_index);
+}
+
+/* Hash to port mapping select tx port */
+static struct team_port *lb_htpm_select_tx_port(struct team *team,
+ struct lb_priv *lb_priv,
+ struct sk_buff *skb,
+ unsigned char hash)
+{
+ return rcu_dereference_bh(LB_HTPM_PORT_BY_HASH(lb_priv, hash));
+}
+
+struct lb_select_tx_port {
+ char *name;
+ lb_select_tx_port_func_t *func;
+};
+
+static const struct lb_select_tx_port lb_select_tx_port_list[] = {
+ {
+ .name = "hash",
+ .func = lb_hash_select_tx_port,
+ },
+ {
+ .name = "hash_to_port_mapping",
+ .func = lb_htpm_select_tx_port,
+ },
+};
+#define LB_SELECT_TX_PORT_LIST_COUNT ARRAY_SIZE(lb_select_tx_port_list)
+
+static char *lb_select_tx_port_get_name(lb_select_tx_port_func_t *func)
+{
+ int i;
+
+ for (i = 0; i < LB_SELECT_TX_PORT_LIST_COUNT; i++) {
+ const struct lb_select_tx_port *item;
+
+ item = &lb_select_tx_port_list[i];
+ if (item->func == func)
+ return item->name;
+ }
+ return NULL;
+}
+
+static lb_select_tx_port_func_t *lb_select_tx_port_get_func(const char *name)
+{
+ int i;
+
+ for (i = 0; i < LB_SELECT_TX_PORT_LIST_COUNT; i++) {
+ const struct lb_select_tx_port *item;
+
+ item = &lb_select_tx_port_list[i];
+ if (!strcmp(item->name, name))
+ return item->func;
+ }
+ return NULL;
+}
+
+static unsigned int lb_get_skb_hash(struct lb_priv *lb_priv,
+ struct sk_buff *skb)
+{
+ struct sk_filter *fp;
+ uint32_t lhash;
+ unsigned char *c;
+
+ fp = rcu_dereference_bh(lb_priv->fp);
+ if (unlikely(!fp))
+ return 0;
+ lhash = SK_RUN_FILTER(fp, skb);
+ c = (char *) &lhash;
+ return c[0] ^ c[1] ^ c[2] ^ c[3];
+}
+
+static void lb_update_tx_stats(unsigned int tx_bytes, struct lb_priv *lb_priv,
+ struct lb_port_priv *lb_port_priv,
+ unsigned char hash)
+{
+ struct lb_pcpu_stats *pcpu_stats;
+ struct lb_stats *port_stats;
+ struct lb_stats *hash_stats;
+
+ pcpu_stats = this_cpu_ptr(lb_priv->pcpu_stats);
+ port_stats = this_cpu_ptr(lb_port_priv->pcpu_stats);
+ hash_stats = &pcpu_stats->hash_stats[hash];
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ port_stats->tx_bytes += tx_bytes;
+ hash_stats->tx_bytes += tx_bytes;
+ u64_stats_update_end(&pcpu_stats->syncp);
+}
+
+static bool lb_transmit(struct team *team, struct sk_buff *skb)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ lb_select_tx_port_func_t *select_tx_port_func;
+ struct team_port *port;
+ unsigned char hash;
+ unsigned int tx_bytes = skb->len;
+
+ hash = lb_get_skb_hash(lb_priv, skb);
+ select_tx_port_func = rcu_dereference_bh(lb_priv->select_tx_port_func);
+ port = select_tx_port_func(team, lb_priv, skb, hash);
if (unlikely(!port))
goto drop;
skb->dev = port->dev;
if (dev_queue_xmit(skb))
return false;
+ lb_update_tx_stats(tx_bytes, lb_priv, get_lb_port_priv(port), hash);
return true;
drop:
@@ -54,14 +230,16 @@ drop:
static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
{
- if (!lb_priv(team)->orig_fprog) {
+ struct lb_priv *lb_priv = get_lb_priv(team);
+
+ if (!lb_priv->ex->orig_fprog) {
ctx->data.bin_val.len = 0;
ctx->data.bin_val.ptr = NULL;
return 0;
}
- ctx->data.bin_val.len = lb_priv(team)->orig_fprog->len *
+ ctx->data.bin_val.len = lb_priv->ex->orig_fprog->len *
sizeof(struct sock_filter);
- ctx->data.bin_val.ptr = lb_priv(team)->orig_fprog->filter;
+ ctx->data.bin_val.ptr = lb_priv->ex->orig_fprog->filter;
return 0;
}
@@ -94,7 +272,9 @@ static void __fprog_destroy(struct sock_fprog *fprog)
static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
{
+ struct lb_priv *lb_priv = get_lb_priv(team);
struct sk_filter *fp = NULL;
+ struct sk_filter *orig_fp;
struct sock_fprog *fprog = NULL;
int err;
@@ -110,14 +290,238 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
}
}
- if (lb_priv(team)->orig_fprog) {
+ if (lb_priv->ex->orig_fprog) {
/* Clear old filter data */
- __fprog_destroy(lb_priv(team)->orig_fprog);
- sk_unattached_filter_destroy(lb_priv(team)->fp);
+ __fprog_destroy(lb_priv->ex->orig_fprog);
+ orig_fp = rcu_dereference_protected(lb_priv->fp,
+ lockdep_is_held(&team->lock));
+ sk_unattached_filter_destroy(orig_fp);
}
- rcu_assign_pointer(lb_priv(team)->fp, fp);
- lb_priv(team)->orig_fprog = fprog;
+ rcu_assign_pointer(lb_priv->fp, fp);
+ lb_priv->ex->orig_fprog = fprog;
+ return 0;
+}
+
+static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ lb_select_tx_port_func_t *func;
+ char *name;
+
+ func = rcu_dereference_protected(lb_priv->select_tx_port_func,
+ lockdep_is_held(&team->lock));
+ name = lb_select_tx_port_get_name(func);
+ BUG_ON(!name);
+ ctx->data.str_val = name;
+ return 0;
+}
+
+static int lb_tx_method_set(struct team *team, struct team_gsetter_ctx *ctx)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ lb_select_tx_port_func_t *func;
+
+ func = lb_select_tx_port_get_func(ctx->data.str_val);
+ if (!func)
+ return -EINVAL;
+ rcu_assign_pointer(lb_priv->select_tx_port_func, func);
+ return 0;
+}
+
+static int lb_tx_hash_to_port_mapping_init(struct team *team,
+ struct team_option_inst_info *info)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ unsigned char hash = info->array_index;
+
+ LB_HTPM_OPT_INST_INFO_BY_HASH(lb_priv, hash) = info;
+ return 0;
+}
+
+static int lb_tx_hash_to_port_mapping_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ struct team_port *port;
+ unsigned char hash = ctx->info->array_index;
+
+ port = LB_HTPM_PORT_BY_HASH(lb_priv, hash);
+ ctx->data.u32_val = port ? port->dev->ifindex : 0;
+ return 0;
+}
+
+static int lb_tx_hash_to_port_mapping_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ struct team_port *port;
+ unsigned char hash = ctx->info->array_index;
+
+ list_for_each_entry(port, &team->port_list, list) {
+ if (ctx->data.u32_val == port->dev->ifindex &&
+ team_port_enabled(port)) {
+ rcu_assign_pointer(LB_HTPM_PORT_BY_HASH(lb_priv, hash),
+ port);
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
+static int lb_hash_stats_init(struct team *team,
+ struct team_option_inst_info *info)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ unsigned char hash = info->array_index;
+
+ lb_priv->ex->stats.info[hash].opt_inst_info = info;
+ return 0;
+}
+
+static int lb_hash_stats_get(struct team *team, struct team_gsetter_ctx *ctx)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ unsigned char hash = ctx->info->array_index;
+
+ ctx->data.bin_val.ptr = &lb_priv->ex->stats.info[hash].stats;
+ ctx->data.bin_val.len = sizeof(struct lb_stats);
+ return 0;
+}
+
+static int lb_port_stats_init(struct team *team,
+ struct team_option_inst_info *info)
+{
+ struct team_port *port = info->port;
+ struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
+
+ lb_port_priv->stats_info.opt_inst_info = info;
+ return 0;
+}
+
+static int lb_port_stats_get(struct team *team, struct team_gsetter_ctx *ctx)
+{
+ struct team_port *port = ctx->info->port;
+ struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
+
+ ctx->data.bin_val.ptr = &lb_port_priv->stats_info.stats;
+ ctx->data.bin_val.len = sizeof(struct lb_stats);
+ return 0;
+}
+
+static void __lb_stats_info_refresh_prepare(struct lb_stats_info *s_info)
+{
+ memcpy(&s_info->last_stats, &s_info->stats, sizeof(struct lb_stats));
+ memset(&s_info->stats, 0, sizeof(struct lb_stats));
+}
+
+static bool __lb_stats_info_refresh_check(struct lb_stats_info *s_info,
+ struct team *team)
+{
+ if (memcmp(&s_info->last_stats, &s_info->stats,
+ sizeof(struct lb_stats))) {
+ team_option_inst_set_change(s_info->opt_inst_info);
+ return true;
+ }
+ return false;
+}
+
+static void __lb_one_cpu_stats_add(struct lb_stats *acc_stats,
+ struct lb_stats *cpu_stats,
+ struct u64_stats_sync *syncp)
+{
+ unsigned int start;
+ struct lb_stats tmp;
+
+ do {
+ start = u64_stats_fetch_begin_bh(syncp);
+ tmp.tx_bytes = cpu_stats->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(syncp, start));
+ acc_stats->tx_bytes += tmp.tx_bytes;
+}
+
+static void lb_stats_refresh(struct work_struct *work)
+{
+ struct team *team;
+ struct lb_priv *lb_priv;
+ struct lb_priv_ex *lb_priv_ex;
+ struct lb_pcpu_stats *pcpu_stats;
+ struct lb_stats *stats;
+ struct lb_stats_info *s_info;
+ struct team_port *port;
+ bool changed = false;
+ int i;
+ int j;
+
+ lb_priv_ex = container_of(work, struct lb_priv_ex,
+ stats.refresh_dw.work);
+
+ team = lb_priv_ex->team;
+ lb_priv = get_lb_priv(team);
+
+ if (!mutex_trylock(&team->lock)) {
+ schedule_delayed_work(&lb_priv_ex->stats.refresh_dw, 0);
+ return;
+ }
+
+ for (j = 0; j < LB_TX_HASHTABLE_SIZE; j++) {
+ s_info = &lb_priv->ex->stats.info[j];
+ __lb_stats_info_refresh_prepare(s_info);
+ for_each_possible_cpu(i) {
+ pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i);
+ stats = &pcpu_stats->hash_stats[j];
+ __lb_one_cpu_stats_add(&s_info->stats, stats,
+ &pcpu_stats->syncp);
+ }
+ changed |= __lb_stats_info_refresh_check(s_info, team);
+ }
+
+ list_for_each_entry(port, &team->port_list, list) {
+ struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
+
+ s_info = &lb_port_priv->stats_info;
+ __lb_stats_info_refresh_prepare(s_info);
+ for_each_possible_cpu(i) {
+ pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i);
+ stats = per_cpu_ptr(lb_port_priv->pcpu_stats, i);
+ __lb_one_cpu_stats_add(&s_info->stats, stats,
+ &pcpu_stats->syncp);
+ }
+ changed |= __lb_stats_info_refresh_check(s_info, team);
+ }
+
+ if (changed)
+ team_options_change_check(team);
+
+ schedule_delayed_work(&lb_priv_ex->stats.refresh_dw,
+ (lb_priv_ex->stats.refresh_interval * HZ) / 10);
+
+ mutex_unlock(&team->lock);
+}
+
+static int lb_stats_refresh_interval_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+
+ ctx->data.u32_val = lb_priv->ex->stats.refresh_interval;
+ return 0;
+}
+
+static int lb_stats_refresh_interval_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ unsigned int interval;
+
+ interval = ctx->data.u32_val;
+ if (lb_priv->ex->stats.refresh_interval == interval)
+ return 0;
+ lb_priv->ex->stats.refresh_interval = interval;
+ if (interval)
+ schedule_delayed_work(&lb_priv->ex->stats.refresh_dw, 0);
+ else
+ cancel_delayed_work(&lb_priv->ex->stats.refresh_dw);
return 0;
}
@@ -128,30 +532,125 @@ static const struct team_option lb_options[] = {
.getter = lb_bpf_func_get,
.setter = lb_bpf_func_set,
},
+ {
+ .name = "lb_tx_method",
+ .type = TEAM_OPTION_TYPE_STRING,
+ .getter = lb_tx_method_get,
+ .setter = lb_tx_method_set,
+ },
+ {
+ .name = "lb_tx_hash_to_port_mapping",
+ .array_size = LB_TX_HASHTABLE_SIZE,
+ .type = TEAM_OPTION_TYPE_U32,
+ .init = lb_tx_hash_to_port_mapping_init,
+ .getter = lb_tx_hash_to_port_mapping_get,
+ .setter = lb_tx_hash_to_port_mapping_set,
+ },
+ {
+ .name = "lb_hash_stats",
+ .array_size = LB_TX_HASHTABLE_SIZE,
+ .type = TEAM_OPTION_TYPE_BINARY,
+ .init = lb_hash_stats_init,
+ .getter = lb_hash_stats_get,
+ },
+ {
+ .name = "lb_port_stats",
+ .per_port = true,
+ .type = TEAM_OPTION_TYPE_BINARY,
+ .init = lb_port_stats_init,
+ .getter = lb_port_stats_get,
+ },
+ {
+ .name = "lb_stats_refresh_interval",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = lb_stats_refresh_interval_get,
+ .setter = lb_stats_refresh_interval_set,
+ },
};
static int lb_init(struct team *team)
{
- return team_options_register(team, lb_options,
- ARRAY_SIZE(lb_options));
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ lb_select_tx_port_func_t *func;
+ int err;
+
+ /* set default tx port selector */
+ func = lb_select_tx_port_get_func("hash");
+ BUG_ON(!func);
+ rcu_assign_pointer(lb_priv->select_tx_port_func, func);
+
+ lb_priv->ex = kzalloc(sizeof(*lb_priv->ex), GFP_KERNEL);
+ if (!lb_priv->ex)
+ return -ENOMEM;
+ lb_priv->ex->team = team;
+
+ lb_priv->pcpu_stats = alloc_percpu(struct lb_pcpu_stats);
+ if (!lb_priv->pcpu_stats) {
+ err = -ENOMEM;
+ goto err_alloc_pcpu_stats;
+ }
+
+ INIT_DELAYED_WORK(&lb_priv->ex->stats.refresh_dw, lb_stats_refresh);
+
+ err = team_options_register(team, lb_options, ARRAY_SIZE(lb_options));
+ if (err)
+ goto err_options_register;
+ return 0;
+
+err_options_register:
+ free_percpu(lb_priv->pcpu_stats);
+err_alloc_pcpu_stats:
+ kfree(lb_priv->ex);
+ return err;
}
static void lb_exit(struct team *team)
{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+
team_options_unregister(team, lb_options,
ARRAY_SIZE(lb_options));
+ cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw);
+ free_percpu(lb_priv->pcpu_stats);
+ kfree(lb_priv->ex);
+}
+
+static int lb_port_enter(struct team *team, struct team_port *port)
+{
+ struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
+
+ lb_port_priv->pcpu_stats = alloc_percpu(struct lb_stats);
+ if (!lb_port_priv->pcpu_stats)
+ return -ENOMEM;
+ return 0;
+}
+
+static void lb_port_leave(struct team *team, struct team_port *port)
+{
+ struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
+
+ free_percpu(lb_port_priv->pcpu_stats);
+}
+
+static void lb_port_disabled(struct team *team, struct team_port *port)
+{
+ lb_tx_hash_to_port_mapping_null_port(team, port);
}
static const struct team_mode_ops lb_mode_ops = {
.init = lb_init,
.exit = lb_exit,
+ .port_enter = lb_port_enter,
+ .port_leave = lb_port_leave,
+ .port_disabled = lb_port_disabled,
.transmit = lb_transmit,
};
-static struct team_mode lb_mode = {
+static const struct team_mode lb_mode = {
.kind = "loadbalance",
.owner = THIS_MODULE,
.priv_size = sizeof(struct lb_priv),
+ .port_priv_size = sizeof(struct lb_port_priv),
.ops = &lb_mode_ops,
};
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index 6abfbdc..0cf38e9 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -1,5 +1,5 @@
/*
- * net/drivers/team/team_mode_roundrobin.c - Round-robin mode for team
+ * drivers/net/team/team_mode_roundrobin.c - Round-robin mode for team
* Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -30,16 +30,16 @@ static struct team_port *__get_first_port_up(struct team *team,
{
struct team_port *cur;
- if (port->linkup)
+ if (team_port_txable(port))
return port;
cur = port;
list_for_each_entry_continue_rcu(cur, &team->port_list, list)
- if (cur->linkup)
+ if (team_port_txable(port))
return cur;
list_for_each_entry_rcu(cur, &team->port_list, list) {
if (cur == port)
break;
- if (cur->linkup)
+ if (team_port_txable(port))
return cur;
}
return NULL;
@@ -81,7 +81,7 @@ static const struct team_mode_ops rr_mode_ops = {
.port_change_mac = rr_port_change_mac,
};
-static struct team_mode rr_mode = {
+static const struct team_mode rr_mode = {
.kind = "roundrobin",
.owner = THIS_MODULE,
.priv_size = sizeof(struct rr_priv),
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 3ae80ec..6564c32 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -358,14 +358,30 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4;
- if ((!skb_cloned(skb)) &&
- ((headroom + tailroom) >= (4 + padlen))) {
- if ((headroom < 4) || (tailroom < padlen)) {
+ /* We need to push 4 bytes in front of frame (packet_len)
+ * and maybe add 4 bytes after the end (if padlen is 4)
+ *
+ * Avoid skb_copy_expand() expensive call, using following rules :
+ * - We are allowed to push 4 bytes in headroom if skb_header_cloned()
+ * is false (and if we have 4 bytes of headroom)
+ * - We are allowed to put 4 bytes at tail if skb_cloned()
+ * is false (and if we have 4 bytes of tailroom)
+ *
+ * TCP packets for example are cloned, but skb_header_release()
+ * was called in tcp stack, allowing us to use headroom for our needs.
+ */
+ if (!skb_header_cloned(skb) &&
+ !(padlen && skb_cloned(skb)) &&
+ headroom + tailroom >= 4 + padlen) {
+ /* following should not happen, but better be safe */
+ if (headroom < 4 ||
+ tailroom < padlen) {
skb->data = memmove(skb->head + 4, skb->data, skb->len);
skb_set_tail_pointer(skb, skb->len);
}
} else {
struct sk_buff *skb2;
+
skb2 = skb_copy_expand(skb, 4, padlen, flags);
dev_kfree_skb_any(skb);
skb = skb2;
@@ -373,8 +389,8 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
return NULL;
}
+ packet_len = ((skb->len ^ 0x0000ffff) << 16) + skb->len;
skb_push(skb, 4);
- packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4);
cpu_to_le32s(&packet_len);
skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
@@ -880,6 +896,8 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->netdev_ops = &ax88172_netdev_ops;
dev->net->ethtool_ops = &ax88172_ethtool_ops;
+ dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */
+ dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */
asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
@@ -1075,6 +1093,8 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->netdev_ops = &ax88772_netdev_ops;
dev->net->ethtool_ops = &ax88772_ethtool_ops;
+ dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */
+ dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */
embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index d848d4d..187c144 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -394,7 +394,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
SET_NETDEV_DEV(dev, &intf->dev);
pnd->dev = dev;
- pnd->usb = usb_get_dev(usbdev);
+ pnd->usb = usbdev;
pnd->intf = intf;
pnd->data_intf = data_intf;
spin_lock_init(&pnd->tx_lock);
@@ -440,7 +440,6 @@ out:
static void usbpn_disconnect(struct usb_interface *intf)
{
struct usbpn_dev *pnd = usb_get_intfdata(intf);
- struct usb_device *usb = pnd->usb;
if (pnd->disconnected)
return;
@@ -449,7 +448,6 @@ static void usbpn_disconnect(struct usb_interface *intf)
usb_driver_release_interface(&usbpn_driver,
(pnd->intf == intf) ? pnd->data_intf : pnd->intf);
unregister_netdev(pnd->dev);
- usb_put_dev(usb);
}
static struct usb_driver usbpn_driver = {
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 964031e..a28a983d 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -59,6 +59,7 @@
#define USB_PRODUCT_IPHONE_3G 0x1292
#define USB_PRODUCT_IPHONE_3GS 0x1294
#define USB_PRODUCT_IPHONE_4 0x1297
+#define USB_PRODUCT_IPAD 0x129a
#define USB_PRODUCT_IPHONE_4_VZW 0x129c
#define USB_PRODUCT_IPHONE_4S 0x12a0
@@ -101,6 +102,10 @@ static struct usb_device_id ipheth_table[] = {
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
IPHETH_USBINTF_PROTO) },
{ USB_DEVICE_AND_INTERFACE_INFO(
+ USB_VENDOR_APPLE, USB_PRODUCT_IPAD,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
+ { USB_DEVICE_AND_INTERFACE_INFO(
USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
IPHETH_USBINTF_PROTO) },
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 7023220..a0b5807 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1329,8 +1329,6 @@ static int pegasus_probe(struct usb_interface *intf,
}
pegasus_count++;
- usb_get_dev(dev);
-
net = alloc_etherdev(sizeof(struct pegasus));
if (!net)
goto out;
@@ -1407,7 +1405,6 @@ out2:
out1:
free_netdev(net);
out:
- usb_put_dev(dev);
pegasus_dec_workqueue();
return res;
}
@@ -1425,7 +1422,6 @@ static void pegasus_disconnect(struct usb_interface *intf)
pegasus->flags |= PEGASUS_UNPLUG;
cancel_delayed_work(&pegasus->carrier_check);
unregister_netdev(pegasus->net);
- usb_put_dev(interface_to_usbdev(intf));
unlink_all_urbs(pegasus);
free_all_urbs(pegasus);
free_skb_pool(pegasus);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3b20678..85c983d 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1,6 +1,10 @@
/*
* Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
*
+ * The probing code is heavily inspired by cdc_ether, which is:
+ * Copyright (C) 2003-2005 by David Brownell
+ * Copyright (C) 2006 by Ole Andre Vadla Ravnas (ActiveSync)
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
@@ -15,11 +19,7 @@
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc-wdm.h>
-/* The name of the CDC Device Management driver */
-#define DM_DRIVER "cdc_wdm"
-
-/*
- * This driver supports wwan (3G/LTE/?) devices using a vendor
+/* This driver supports wwan (3G/LTE/?) devices using a vendor
* specific management protocol called Qualcomm MSM Interface (QMI) -
* in addition to the more common AT commands over serial interface
* management
@@ -31,59 +31,117 @@
* management protocol is used in place of the standard CDC
* notifications NOTIFY_NETWORK_CONNECTION and NOTIFY_SPEED_CHANGE
*
+ * Alternatively, control and data functions can be combined in a
+ * single USB interface.
+ *
* Handling a protocol like QMI is out of the scope for any driver.
- * It can be exported as a character device using the cdc-wdm driver,
- * which will enable userspace applications ("modem managers") to
- * handle it. This may be required to use the network interface
- * provided by the driver.
+ * It is exported as a character device using the cdc-wdm driver as
+ * a subdriver, enabling userspace applications ("modem managers") to
+ * handle it.
*
* These devices may alternatively/additionally be configured using AT
- * commands on any of the serial interfaces driven by the option driver
- *
- * This driver binds only to the data ("slave") interface to enable
- * the cdc-wdm driver to bind to the control interface. It still
- * parses the CDC functional descriptors on the control interface to
- * a) verify that this is indeed a handled interface (CDC Union
- * header lists it as slave)
- * b) get MAC address and other ethernet config from the CDC Ethernet
- * header
- * c) enable user bind requests against the control interface, which
- * is the common way to bind to CDC Ethernet Control Model type
- * interfaces
- * d) provide a hint to the user about which interface is the
- * corresponding management interface
+ * commands on a serial interface
*/
+/* driver specific data */
+struct qmi_wwan_state {
+ struct usb_driver *subdriver;
+ atomic_t pmcount;
+ unsigned long unused;
+ struct usb_interface *control;
+ struct usb_interface *data;
+};
+
+/* using a counter to merge subdriver requests with our own into a combined state */
+static int qmi_wwan_manage_power(struct usbnet *dev, int on)
+{
+ struct qmi_wwan_state *info = (void *)&dev->data;
+ int rv = 0;
+
+ dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(&info->pmcount), on);
+
+ if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
+ /* need autopm_get/put here to ensure the usbcore sees the new value */
+ rv = usb_autopm_get_interface(dev->intf);
+ if (rv < 0)
+ goto err;
+ dev->intf->needs_remote_wakeup = on;
+ usb_autopm_put_interface(dev->intf);
+ }
+err:
+ return rv;
+}
+
+static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+
+ /* can be called while disconnecting */
+ if (!dev)
+ return 0;
+ return qmi_wwan_manage_power(dev, on);
+}
+
+/* collect all three endpoints and register subdriver */
+static int qmi_wwan_register_subdriver(struct usbnet *dev)
+{
+ int rv;
+ struct usb_driver *subdriver = NULL;
+ struct qmi_wwan_state *info = (void *)&dev->data;
+
+ /* collect bulk endpoints */
+ rv = usbnet_get_endpoints(dev, info->data);
+ if (rv < 0)
+ goto err;
+
+ /* update status endpoint if separate control interface */
+ if (info->control != info->data)
+ dev->status = &info->control->cur_altsetting->endpoint[0];
+
+ /* require interrupt endpoint for subdriver */
+ if (!dev->status) {
+ rv = -EINVAL;
+ goto err;
+ }
+
+ /* for subdriver power management */
+ atomic_set(&info->pmcount, 0);
+
+ /* register subdriver */
+ subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power);
+ if (IS_ERR(subdriver)) {
+ dev_err(&info->control->dev, "subdriver registration failed\n");
+ rv = PTR_ERR(subdriver);
+ goto err;
+ }
+
+ /* prevent usbnet from using status endpoint */
+ dev->status = NULL;
+
+ /* save subdriver struct for suspend/resume wrappers */
+ info->subdriver = subdriver;
+
+err:
+ return rv;
+}
+
static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
{
int status = -1;
- struct usb_interface *control = NULL;
u8 *buf = intf->cur_altsetting->extra;
int len = intf->cur_altsetting->extralen;
struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
struct usb_cdc_union_desc *cdc_union = NULL;
struct usb_cdc_ether_desc *cdc_ether = NULL;
- u32 required = 1 << USB_CDC_HEADER_TYPE | 1 << USB_CDC_UNION_TYPE;
u32 found = 0;
- atomic_t *pmcount = (void *)&dev->data[1];
+ struct usb_driver *driver = driver_of(intf);
+ struct qmi_wwan_state *info = (void *)&dev->data;
- atomic_set(pmcount, 0);
+ BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
- /*
- * assume a data interface has no additional descriptors and
- * that the control and data interface are numbered
- * consecutively - this holds for the Huawei device at least
- */
- if (len == 0 && desc->bInterfaceNumber > 0) {
- control = usb_ifnum_to_if(dev->udev, desc->bInterfaceNumber - 1);
- if (!control)
- goto err;
-
- buf = control->cur_altsetting->extra;
- len = control->cur_altsetting->extralen;
- dev_dbg(&intf->dev, "guessing \"control\" => %s, \"data\" => this\n",
- dev_name(&control->dev));
- }
+ /* require a single interrupt status endpoint for subdriver */
+ if (intf->cur_altsetting->desc.bNumEndpoints != 1)
+ goto err;
while (len > 3) {
struct usb_descriptor_header *h = (void *)buf;
@@ -142,15 +200,23 @@ next_desc:
}
/* did we find all the required ones? */
- if ((found & required) != required) {
+ if (!(found & (1 << USB_CDC_HEADER_TYPE)) ||
+ !(found & (1 << USB_CDC_UNION_TYPE))) {
dev_err(&intf->dev, "CDC functional descriptors missing\n");
goto err;
}
- /* give the user a helpful hint if trying to bind to the wrong interface */
- if (cdc_union && desc->bInterfaceNumber == cdc_union->bMasterInterface0) {
- dev_err(&intf->dev, "leaving \"control\" interface for " DM_DRIVER " - try binding to %s instead!\n",
- dev_name(&usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0)->dev));
+ /* verify CDC Union */
+ if (desc->bInterfaceNumber != cdc_union->bMasterInterface0) {
+ dev_err(&intf->dev, "bogus CDC Union: master=%u\n", cdc_union->bMasterInterface0);
+ goto err;
+ }
+
+ /* need to save these for unbind */
+ info->control = intf;
+ info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0);
+ if (!info->data) {
+ dev_err(&intf->dev, "bogus CDC Union: slave=%u\n", cdc_union->bSlaveInterface0);
goto err;
}
@@ -160,59 +226,29 @@ next_desc:
usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress);
}
- /* success! point the user to the management interface */
- if (control)
- dev_info(&intf->dev, "Use \"" DM_DRIVER "\" for QMI interface %s\n",
- dev_name(&control->dev));
-
- /* XXX: add a sysfs symlink somewhere to help management applications find it? */
+ /* claim data interface and set it up */
+ status = usb_driver_claim_interface(driver, info->data, dev);
+ if (status < 0)
+ goto err;
- /* collect bulk endpoints now that we know intf == "data" interface */
- status = usbnet_get_endpoints(dev, intf);
+ status = qmi_wwan_register_subdriver(dev);
+ if (status < 0) {
+ usb_set_intfdata(info->data, NULL);
+ usb_driver_release_interface(driver, info->data);
+ }
err:
return status;
}
-/* using a counter to merge subdriver requests with our own into a combined state */
-static int qmi_wwan_manage_power(struct usbnet *dev, int on)
-{
- atomic_t *pmcount = (void *)&dev->data[1];
- int rv = 0;
-
- dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(pmcount), on);
-
- if ((on && atomic_add_return(1, pmcount) == 1) || (!on && atomic_dec_and_test(pmcount))) {
- /* need autopm_get/put here to ensure the usbcore sees the new value */
- rv = usb_autopm_get_interface(dev->intf);
- if (rv < 0)
- goto err;
- dev->intf->needs_remote_wakeup = on;
- usb_autopm_put_interface(dev->intf);
- }
-err:
- return rv;
-}
-
-static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
-{
- struct usbnet *dev = usb_get_intfdata(intf);
- return qmi_wwan_manage_power(dev, on);
-}
-
/* Some devices combine the "control" and "data" functions into a
* single interface with all three endpoints: interrupt + bulk in and
* out
- *
- * Setting up cdc-wdm as a subdriver owning the interrupt endpoint
- * will let it provide userspace access to the encapsulated QMI
- * protocol without interfering with the usbnet operations.
- */
+ */
static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)
{
int rv;
- struct usb_driver *subdriver = NULL;
- atomic_t *pmcount = (void *)&dev->data[1];
+ struct qmi_wwan_state *info = (void *)&dev->data;
/* ZTE makes devices where the interface descriptors and endpoint
* configurations of two or more interfaces are identical, even
@@ -228,66 +264,39 @@ static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)
goto err;
}
- atomic_set(pmcount, 0);
-
- /* collect all three endpoints */
- rv = usbnet_get_endpoints(dev, intf);
- if (rv < 0)
- goto err;
-
- /* require interrupt endpoint for subdriver */
- if (!dev->status) {
- rv = -EINVAL;
- goto err;
- }
-
- subdriver = usb_cdc_wdm_register(intf, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power);
- if (IS_ERR(subdriver)) {
- rv = PTR_ERR(subdriver);
- goto err;
- }
-
- /* can't let usbnet use the interrupt endpoint */
- dev->status = NULL;
-
- /* save subdriver struct for suspend/resume wrappers */
- dev->data[0] = (unsigned long)subdriver;
-
-err:
- return rv;
-}
-
-/* Gobi devices uses identical class/protocol codes for all interfaces regardless
- * of function. Some of these are CDC ACM like and have the exact same endpoints
- * we are looking for. This leaves two possible strategies for identifying the
- * correct interface:
- * a) hardcoding interface number, or
- * b) use the fact that the wwan interface is the only one lacking additional
- * (CDC functional) descriptors
- *
- * Let's see if we can get away with the generic b) solution.
- */
-static int qmi_wwan_bind_gobi(struct usbnet *dev, struct usb_interface *intf)
-{
- int rv = -EINVAL;
-
- /* ignore any interface with additional descriptors */
- if (intf->cur_altsetting->extralen)
- goto err;
+ /* control and data is shared */
+ info->control = intf;
+ info->data = intf;
+ rv = qmi_wwan_register_subdriver(dev);
- rv = qmi_wwan_bind_shared(dev, intf);
err:
return rv;
}
-static void qmi_wwan_unbind_shared(struct usbnet *dev, struct usb_interface *intf)
+static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf)
{
- struct usb_driver *subdriver = (void *)dev->data[0];
-
- if (subdriver && subdriver->disconnect)
- subdriver->disconnect(intf);
+ struct qmi_wwan_state *info = (void *)&dev->data;
+ struct usb_driver *driver = driver_of(intf);
+ struct usb_interface *other;
+
+ if (info->subdriver && info->subdriver->disconnect)
+ info->subdriver->disconnect(info->control);
+
+ /* allow user to unbind using either control or data */
+ if (intf == info->control)
+ other = info->data;
+ else
+ other = info->control;
+
+ /* only if not shared */
+ if (other && intf != other) {
+ usb_set_intfdata(other, NULL);
+ usb_driver_release_interface(driver, other);
+ }
- dev->data[0] = (unsigned long)NULL;
+ info->subdriver = NULL;
+ info->data = NULL;
+ info->control = NULL;
}
/* suspend/resume wrappers calling both usbnet and the cdc-wdm
@@ -299,15 +308,15 @@ static void qmi_wwan_unbind_shared(struct usbnet *dev, struct usb_interface *int
static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
- struct usb_driver *subdriver = (void *)dev->data[0];
+ struct qmi_wwan_state *info = (void *)&dev->data;
int ret;
ret = usbnet_suspend(intf, message);
if (ret < 0)
goto err;
- if (subdriver && subdriver->suspend)
- ret = subdriver->suspend(intf, message);
+ if (info->subdriver && info->subdriver->suspend)
+ ret = info->subdriver->suspend(intf, message);
if (ret < 0)
usbnet_resume(intf);
err:
@@ -317,59 +326,77 @@ err:
static int qmi_wwan_resume(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
- struct usb_driver *subdriver = (void *)dev->data[0];
+ struct qmi_wwan_state *info = (void *)&dev->data;
int ret = 0;
- if (subdriver && subdriver->resume)
- ret = subdriver->resume(intf);
+ if (info->subdriver && info->subdriver->resume)
+ ret = info->subdriver->resume(intf);
if (ret < 0)
goto err;
ret = usbnet_resume(intf);
- if (ret < 0 && subdriver && subdriver->resume && subdriver->suspend)
- subdriver->suspend(intf, PMSG_SUSPEND);
+ if (ret < 0 && info->subdriver && info->subdriver->resume && info->subdriver->suspend)
+ info->subdriver->suspend(intf, PMSG_SUSPEND);
err:
return ret;
}
-
static const struct driver_info qmi_wwan_info = {
- .description = "QMI speaking wwan device",
+ .description = "WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind,
+ .unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
};
static const struct driver_info qmi_wwan_shared = {
- .description = "QMI speaking wwan device with combined interface",
+ .description = "WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind_shared,
- .unbind = qmi_wwan_unbind_shared,
+ .unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
};
-static const struct driver_info qmi_wwan_gobi = {
- .description = "Qualcomm Gobi wwan/QMI device",
+static const struct driver_info qmi_wwan_force_int0 = {
+ .description = "Qualcomm WWAN/QMI device",
.flags = FLAG_WWAN,
- .bind = qmi_wwan_bind_gobi,
- .unbind = qmi_wwan_unbind_shared,
+ .bind = qmi_wwan_bind_shared,
+ .unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
+ .data = BIT(0), /* interface whitelist bitmap */
};
-/* ZTE suck at making USB descriptors */
static const struct driver_info qmi_wwan_force_int1 = {
.description = "Qualcomm WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind_shared,
- .unbind = qmi_wwan_unbind_shared,
+ .unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
.data = BIT(1), /* interface whitelist bitmap */
};
+static const struct driver_info qmi_wwan_force_int2 = {
+ .description = "Qualcomm WWAN/QMI device",
+ .flags = FLAG_WWAN,
+ .bind = qmi_wwan_bind_shared,
+ .unbind = qmi_wwan_unbind,
+ .manage_power = qmi_wwan_manage_power,
+ .data = BIT(2), /* interface whitelist bitmap */
+};
+
+static const struct driver_info qmi_wwan_force_int3 = {
+ .description = "Qualcomm WWAN/QMI device",
+ .flags = FLAG_WWAN,
+ .bind = qmi_wwan_bind_shared,
+ .unbind = qmi_wwan_unbind,
+ .manage_power = qmi_wwan_manage_power,
+ .data = BIT(3), /* interface whitelist bitmap */
+};
+
static const struct driver_info qmi_wwan_force_int4 = {
.description = "Qualcomm WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind_shared,
- .unbind = qmi_wwan_unbind_shared,
+ .unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
.data = BIT(4), /* interface whitelist bitmap */
};
@@ -390,16 +417,23 @@ static const struct driver_info qmi_wwan_force_int4 = {
static const struct driver_info qmi_wwan_sierra = {
.description = "Sierra Wireless wwan/QMI device",
.flags = FLAG_WWAN,
- .bind = qmi_wwan_bind_gobi,
- .unbind = qmi_wwan_unbind_shared,
+ .bind = qmi_wwan_bind_shared,
+ .unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
.data = BIT(8) | BIT(19), /* interface whitelist bitmap */
};
#define HUAWEI_VENDOR_ID 0x12D1
+
+/* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */
+#define QMI_GOBI1K_DEVICE(vend, prod) \
+ USB_DEVICE(vend, prod), \
+ .driver_info = (unsigned long)&qmi_wwan_force_int3
+
+/* Gobi 2000 and Gobi 3000 QMI/wwan interface number is 0 according to qcserial */
#define QMI_GOBI_DEVICE(vend, prod) \
USB_DEVICE(vend, prod), \
- .driver_info = (unsigned long)&qmi_wwan_gobi
+ .driver_info = (unsigned long)&qmi_wwan_force_int0
static const struct usb_device_id products[] = {
{ /* Huawei E392, E398 and possibly others sharing both device id and more... */
@@ -407,7 +441,7 @@ static const struct usb_device_id products[] = {
.idVendor = HUAWEI_VENDOR_ID,
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
.bInterfaceSubClass = 1,
- .bInterfaceProtocol = 8, /* NOTE: This is the *slave* interface of the CDC Union! */
+ .bInterfaceProtocol = 9, /* CDC Ethernet *control* interface */
.driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */
@@ -415,7 +449,7 @@ static const struct usb_device_id products[] = {
.idVendor = HUAWEI_VENDOR_ID,
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
.bInterfaceSubClass = 1,
- .bInterfaceProtocol = 56, /* NOTE: This is the *slave* interface of the CDC Union! */
+ .bInterfaceProtocol = 57, /* CDC Ethernet *control* interface */
.driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* Huawei E392, E398 and possibly others in "Windows mode"
@@ -501,6 +535,15 @@ static const struct usb_device_id products[] = {
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int4,
},
+ { /* ZTE MF60 */
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x19d2,
+ .idProduct = 0x1402,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0xff,
+ .bInterfaceProtocol = 0xff,
+ .driver_info = (unsigned long)&qmi_wwan_force_int2,
+ },
{ /* Sierra Wireless MC77xx in QMI mode */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x1199,
@@ -510,20 +553,24 @@ static const struct usb_device_id products[] = {
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_sierra,
},
- {QMI_GOBI_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
- {QMI_GOBI_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
- {QMI_GOBI_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */
- {QMI_GOBI_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
- {QMI_GOBI_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */
- {QMI_GOBI_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */
- {QMI_GOBI_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
- {QMI_GOBI_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
- {QMI_GOBI_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */
- {QMI_GOBI_DEVICE(0x05c6, 0x9002)}, /* Generic Gobi Modem device */
- {QMI_GOBI_DEVICE(0x05c6, 0x9202)}, /* Generic Gobi Modem device */
- {QMI_GOBI_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */
- {QMI_GOBI_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */
- {QMI_GOBI_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */
+
+ /* Gobi 1000 devices */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
+ {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
+ {QMI_GOBI1K_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */
+ {QMI_GOBI1K_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9002)}, /* Generic Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9202)}, /* Generic Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */
+
+ /* Gobi 2000 and 3000 devices */
{QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
{QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */
{QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
@@ -565,17 +612,7 @@ static struct usb_driver qmi_wwan_driver = {
.disable_hub_initiated_lpm = 1,
};
-static int __init qmi_wwan_init(void)
-{
- return usb_register(&qmi_wwan_driver);
-}
-module_init(qmi_wwan_init);
-
-static void __exit qmi_wwan_exit(void)
-{
- usb_deregister(&qmi_wwan_driver);
-}
-module_exit(qmi_wwan_exit);
+module_usb_driver(qmi_wwan_driver);
MODULE_AUTHOR("Bjørn Mork <bjorn@mork.no>");
MODULE_DESCRIPTION("Qualcomm MSM Interface (QMI) WWAN driver");
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 3faef56..d75d1f5 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -946,7 +946,7 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
}
static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
-static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
+static const struct sierra_net_info_data sierra_net_info_data_direct_ip = {
.rx_urb_size = 8 * 1024,
.whitelist = {
.infolen = ARRAY_SIZE(sierra_net_ifnum_list),
@@ -954,7 +954,7 @@ static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
}
};
-static const struct driver_info sierra_net_info_68A3 = {
+static const struct driver_info sierra_net_info_direct_ip = {
.description = "Sierra Wireless USB-to-WWAN Modem",
.flags = FLAG_WWAN | FLAG_SEND_ZLP,
.bind = sierra_net_bind,
@@ -962,12 +962,18 @@ static const struct driver_info sierra_net_info_68A3 = {
.status = sierra_net_status,
.rx_fixup = sierra_net_rx_fixup,
.tx_fixup = sierra_net_tx_fixup,
- .data = (unsigned long)&sierra_net_info_data_68A3,
+ .data = (unsigned long)&sierra_net_info_data_direct_ip,
};
static const struct usb_device_id products[] = {
{USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
- .driver_info = (unsigned long) &sierra_net_info_68A3},
+ .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+ {USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */
+ .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+ {USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */
+ .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+ {USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */
+ .driver_info = (unsigned long) &sierra_net_info_direct_ip},
{}, /* last item */
};
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index b1112e7..bd7cbaa 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -578,6 +578,36 @@ static int smsc95xx_ethtool_set_eeprom(struct net_device *netdev,
return smsc95xx_write_eeprom(dev, ee->offset, ee->len, data);
}
+static int smsc95xx_ethtool_getregslen(struct net_device *netdev)
+{
+ /* all smsc95xx registers */
+ return COE_CR - ID_REV + 1;
+}
+
+static void
+smsc95xx_ethtool_getregs(struct net_device *netdev, struct ethtool_regs *regs,
+ void *buf)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ unsigned int i, j;
+ int retval;
+ u32 *data = buf;
+
+ retval = smsc95xx_read_reg(dev, ID_REV, &regs->version);
+ if (retval < 0) {
+ netdev_warn(netdev, "REGS: cannot read ID_REV\n");
+ return;
+ }
+
+ for (i = ID_REV, j = 0; i <= COE_CR; i += (sizeof(u32)), j++) {
+ retval = smsc95xx_read_reg(dev, i, &data[j]);
+ if (retval < 0) {
+ netdev_warn(netdev, "REGS: cannot read reg[%x]\n", i);
+ return;
+ }
+ }
+}
+
static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_link = usbnet_get_link,
.nway_reset = usbnet_nway_reset,
@@ -589,6 +619,8 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_eeprom_len = smsc95xx_ethtool_get_eeprom_len,
.get_eeprom = smsc95xx_ethtool_get_eeprom,
.set_eeprom = smsc95xx_ethtool_set_eeprom,
+ .get_regs_len = smsc95xx_ethtool_getregslen,
+ .get_regs = smsc95xx_ethtool_getregs,
};
static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 9f58330..e92c057 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -180,7 +180,40 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
}
EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
-static void intr_complete (struct urb *urb);
+static void intr_complete (struct urb *urb)
+{
+ struct usbnet *dev = urb->context;
+ int status = urb->status;
+
+ switch (status) {
+ /* success */
+ case 0:
+ dev->driver_info->status(dev, urb);
+ break;
+
+ /* software-driven interface shutdown */
+ case -ENOENT: /* urb killed */
+ case -ESHUTDOWN: /* hardware gone */
+ netif_dbg(dev, ifdown, dev->net,
+ "intr shutdown, code %d\n", status);
+ return;
+
+ /* NOTE: not throttling like RX/TX, since this endpoint
+ * already polls infrequently
+ */
+ default:
+ netdev_dbg(dev->net, "intr status %d\n", status);
+ break;
+ }
+
+ if (!netif_running (dev->net))
+ return;
+
+ status = usb_submit_urb (urb, GFP_ATOMIC);
+ if (status != 0)
+ netif_err(dev, timer, dev->net,
+ "intr resubmit --> %d\n", status);
+}
static int init_status (struct usbnet *dev, struct usb_interface *intf)
{
@@ -519,42 +552,6 @@ block:
netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
}
-static void intr_complete (struct urb *urb)
-{
- struct usbnet *dev = urb->context;
- int status = urb->status;
-
- switch (status) {
- /* success */
- case 0:
- dev->driver_info->status(dev, urb);
- break;
-
- /* software-driven interface shutdown */
- case -ENOENT: /* urb killed */
- case -ESHUTDOWN: /* hardware gone */
- netif_dbg(dev, ifdown, dev->net,
- "intr shutdown, code %d\n", status);
- return;
-
- /* NOTE: not throttling like RX/TX, since this endpoint
- * already polls infrequently
- */
- default:
- netdev_dbg(dev->net, "intr status %d\n", status);
- break;
- }
-
- if (!netif_running (dev->net))
- return;
-
- memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
- status = usb_submit_urb (urb, GFP_ATOMIC);
- if (status != 0)
- netif_err(dev, timer, dev->net,
- "intr resubmit --> %d\n", status);
-}
-
/*-------------------------------------------------------------------------*/
void usbnet_pause_rx(struct usbnet *dev)
{
@@ -796,11 +793,13 @@ int usbnet_open (struct net_device *net)
if (info->manage_power) {
retval = info->manage_power(dev, 1);
if (retval < 0)
- goto done;
+ goto done_manage_power_error;
usb_autopm_put_interface(dev->intf);
}
return retval;
+done_manage_power_error:
+ clear_bit(EVENT_DEV_OPEN, &dev->flags);
done:
usb_autopm_put_interface(dev->intf);
done_nopm:
@@ -876,9 +875,9 @@ void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
{
struct usbnet *dev = netdev_priv(net);
- strncpy (info->driver, dev->driver_name, sizeof info->driver);
- strncpy (info->version, DRIVER_VERSION, sizeof info->version);
- strncpy (info->fw_version, dev->driver_info->description,
+ strlcpy (info->driver, dev->driver_name, sizeof info->driver);
+ strlcpy (info->version, DRIVER_VERSION, sizeof info->version);
+ strlcpy (info->fw_version, dev->driver_info->description,
sizeof info->fw_version);
usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
}
@@ -1202,6 +1201,21 @@ deferred:
}
EXPORT_SYMBOL_GPL(usbnet_start_xmit);
+static void rx_alloc_submit(struct usbnet *dev, gfp_t flags)
+{
+ struct urb *urb;
+ int i;
+
+ /* don't refill the queue all at once */
+ for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) {
+ urb = usb_alloc_urb(0, flags);
+ if (urb != NULL) {
+ if (rx_submit(dev, urb, flags) == -ENOLINK)
+ return;
+ }
+ }
+}
+
/*-------------------------------------------------------------------------*/
// tasklet (work deferred from completions, in_irq) or timer
@@ -1241,26 +1255,14 @@ static void usbnet_bh (unsigned long param)
!timer_pending (&dev->delay) &&
!test_bit (EVENT_RX_HALT, &dev->flags)) {
int temp = dev->rxq.qlen;
- int qlen = RX_QLEN (dev);
-
- if (temp < qlen) {
- struct urb *urb;
- int i;
-
- // don't refill the queue all at once
- for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
- urb = usb_alloc_urb (0, GFP_ATOMIC);
- if (urb != NULL) {
- if (rx_submit (dev, urb, GFP_ATOMIC) ==
- -ENOLINK)
- return;
- }
- }
+
+ if (temp < RX_QLEN(dev)) {
+ rx_alloc_submit(dev, GFP_ATOMIC);
if (temp != dev->rxq.qlen)
netif_dbg(dev, link, dev->net,
"rxqlen %d --> %d\n",
temp, dev->rxq.qlen);
- if (dev->rxq.qlen < qlen)
+ if (dev->rxq.qlen < RX_QLEN(dev))
tasklet_schedule (&dev->bh);
}
if (dev->txq.qlen < TX_QLEN (dev))
@@ -1307,7 +1309,6 @@ void usbnet_disconnect (struct usb_interface *intf)
usb_free_urb(dev->interrupt);
free_netdev(net);
- usb_put_dev (xdev);
}
EXPORT_SYMBOL_GPL(usbnet_disconnect);
@@ -1363,8 +1364,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
xdev = interface_to_usbdev (udev);
interface = udev->cur_altsetting;
- usb_get_dev (xdev);
-
status = -ENOMEM;
// set up our own records
@@ -1493,7 +1492,6 @@ out3:
out1:
free_netdev(net);
out:
- usb_put_dev(xdev);
return status;
}
EXPORT_SYMBOL_GPL(usbnet_probe);
@@ -1513,6 +1511,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
spin_lock_irq(&dev->txq.lock);
/* don't autosuspend while transmitting */
if (dev->txq.qlen && PMSG_IS_AUTO(message)) {
+ dev->suspend_count--;
spin_unlock_irq(&dev->txq.lock);
return -EBUSY;
} else {
@@ -1569,6 +1568,13 @@ int usbnet_resume (struct usb_interface *intf)
spin_unlock_irq(&dev->txq.lock);
if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
+ /* handle remote wakeup ASAP */
+ if (!dev->wait &&
+ netif_device_present(dev->net) &&
+ !timer_pending(&dev->delay) &&
+ !test_bit(EVENT_RX_HALT, &dev->flags))
+ rx_alloc_submit(dev, GFP_KERNEL);
+
if (!(dev->txq.qlen >= TX_QLEN(dev)))
netif_tx_wake_all_queues(dev->net);
tasklet_schedule (&dev->bh);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5214b1e..1db445b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -42,7 +42,8 @@ module_param(gso, bool, 0444);
#define VIRTNET_DRIVER_VERSION "1.0.0"
struct virtnet_stats {
- struct u64_stats_sync syncp;
+ struct u64_stats_sync tx_syncp;
+ struct u64_stats_sync rx_syncp;
u64 tx_bytes;
u64 tx_packets;
@@ -300,10 +301,10 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
hdr = skb_vnet_hdr(skb);
- u64_stats_update_begin(&stats->syncp);
+ u64_stats_update_begin(&stats->rx_syncp);
stats->rx_bytes += skb->len;
stats->rx_packets++;
- u64_stats_update_end(&stats->syncp);
+ u64_stats_update_end(&stats->rx_syncp);
if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
pr_debug("Needs csum!\n");
@@ -565,10 +566,10 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
- u64_stats_update_begin(&stats->syncp);
+ u64_stats_update_begin(&stats->tx_syncp);
stats->tx_bytes += skb->len;
stats->tx_packets++;
- u64_stats_update_end(&stats->syncp);
+ u64_stats_update_end(&stats->tx_syncp);
tot_sgs += skb_vnet_hdr(skb)->num_sg;
dev_kfree_skb_any(skb);
@@ -703,12 +704,16 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
u64 tpackets, tbytes, rpackets, rbytes;
do {
- start = u64_stats_fetch_begin(&stats->syncp);
+ start = u64_stats_fetch_begin(&stats->tx_syncp);
tpackets = stats->tx_packets;
tbytes = stats->tx_bytes;
+ } while (u64_stats_fetch_retry(&stats->tx_syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin(&stats->rx_syncp);
rpackets = stats->rx_packets;
rbytes = stats->rx_bytes;
- } while (u64_stats_fetch_retry(&stats->syncp, start));
+ } while (u64_stats_fetch_retry(&stats->rx_syncp, start));
tot->rx_packets += rpackets;
tot->tx_packets += tpackets;
@@ -1057,7 +1062,7 @@ static int virtnet_probe(struct virtio_device *vdev)
return -ENOMEM;
/* Set up network device as normal. */
- dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
dev->netdev_ops = &virtnet_netdev;
dev->features = NETIF_F_HIGHDMA;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3f04ba0..93e0cfb 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1037,7 +1037,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
#endif
dev_dbg(&adapter->netdev->dev,
"txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
- (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
+ (u32)(ctx.sop_txd -
tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index d7a65e1..44db8b7 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -231,7 +231,7 @@ static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len)
}
p = icp;
- count = x25_asy_esc(p, (unsigned char *) sl->xbuff, len);
+ count = x25_asy_esc(p, sl->xbuff, len);
/* Order of next two lines is *very* important.
* When we are sending a little amount of data,
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 7632f8c..283237f 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -1267,7 +1267,7 @@ int i2400m_fw_check(struct i2400m *i2400m, const void *bcf, size_t bcf_size)
size_t leftover, offset, header_len, size;
leftover = top - itr;
- offset = itr - (const void *) bcf;
+ offset = itr - bcf;
if (leftover <= sizeof(*bcf_hdr)) {
dev_err(dev, "firmware %s: %zu B left at @%zx, "
"not enough for BCF header\n",
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 0ac09a2..97afcec 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1738,8 +1738,7 @@ static int adm8211_alloc_rings(struct ieee80211_hw *dev)
return -ENOMEM;
}
- priv->tx_ring = (struct adm8211_desc *)(priv->rx_ring +
- priv->rx_ring_size);
+ priv->tx_ring = priv->rx_ring + priv->rx_ring_size;
priv->tx_ring_dma = priv->rx_ring_dma +
sizeof(struct adm8211_desc) * priv->rx_ring_size;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index a747c63..f9f15bb 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1997,7 +1997,7 @@ static int mpi_send_packet (struct net_device *dev)
* ------------------------------------------------
*/
- memcpy((char *)ai->txfids[0].virtual_host_addr,
+ memcpy(ai->txfids[0].virtual_host_addr,
(char *)&wifictlhdr8023, sizeof(wifictlhdr8023));
payloadLen = (__le16 *)(ai->txfids[0].virtual_host_addr +
@@ -4212,7 +4212,7 @@ static int PC4500_writerid(struct airo_info *ai, u16 rid,
airo_print_err(ai->dev->name, "%s: len=%d", __func__, len);
rc = -1;
} else {
- memcpy((char *)ai->config_desc.virtual_host_addr,
+ memcpy(ai->config_desc.virtual_host_addr,
pBuf, len);
rc = issuecommand(ai, &cmd, &rsp);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 25de6f9..ab2bfcb 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3178,7 +3178,7 @@ static int ar9300_compress_decision(struct ath_hw *ah,
mdata_size, length);
return -1;
}
- memcpy(mptr, (u8 *) (word + COMP_HDR_LEN), length);
+ memcpy(mptr, word + COMP_HDR_LEN, length);
ath_dbg(common, EEPROM,
"restored eeprom %d: uncompressed, length %d\n",
it, length);
@@ -3199,7 +3199,7 @@ static int ar9300_compress_decision(struct ath_hw *ah,
"restore eeprom %d: block, reference %d, length %d\n",
it, reference, length);
ar9300_uncompress_block(ah, mptr, mdata_size,
- (u8 *) (word + COMP_HDR_LEN), length);
+ (word + COMP_HDR_LEN), length);
break;
default:
ath_dbg(common, EEPROM, "unknown compression code %d\n", code);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index a850f44..7d07510 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -188,8 +188,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
{
#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
struct ath_common *common = ath9k_hw_common(ah);
- struct ar5416_eeprom_4k *eep =
- (struct ar5416_eeprom_4k *) &ah->eeprom.map4k;
+ struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
u16 *eepdata, temp, magic, magic2;
u32 sum = 0, el;
bool need_swap = false;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 56290f3..a8ac30a 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -264,8 +264,7 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
{
- struct ar5416_eeprom_def *eep =
- (struct ar5416_eeprom_def *) &ah->eeprom.def;
+ struct ar5416_eeprom_def *eep = &ah->eeprom.def;
struct ath_common *common = ath9k_hw_common(ah);
u16 *eepdata, temp, magic, magic2;
u32 sum = 0, el;
diff --git a/drivers/net/wireless/ath/carl9170/cmd.c b/drivers/net/wireless/ath/carl9170/cmd.c
index 195dc65..39a6387 100644
--- a/drivers/net/wireless/ath/carl9170/cmd.c
+++ b/drivers/net/wireless/ath/carl9170/cmd.c
@@ -138,7 +138,7 @@ int carl9170_reboot(struct ar9170 *ar)
if (!cmd)
return -ENOMEM;
- err = __carl9170_exec_cmd(ar, (struct carl9170_cmd *)cmd, true);
+ err = __carl9170_exec_cmd(ar, cmd, true);
return err;
}
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 25910a1..6f6a341 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -161,7 +161,7 @@ static void carl9170_cmd_callback(struct ar9170 *ar, u32 len, void *buffer)
void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
{
- struct carl9170_rsp *cmd = (void *) buf;
+ struct carl9170_rsp *cmd = buf;
struct ieee80211_vif *vif;
if (carl9170_check_sequence(ar, cmd->hdr.seq))
@@ -520,7 +520,7 @@ static u8 *carl9170_find_ie(u8 *data, unsigned int len, u8 ie)
*/
static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
{
- struct ieee80211_hdr *hdr = (void *) data;
+ struct ieee80211_hdr *hdr = data;
struct ieee80211_tim_ie *tim_ie;
u8 *tim;
u8 tim_len;
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index d07c030..4a4e98f 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -2952,10 +2952,10 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
/* current AP address - only in reassoc frame */
if (is_reassoc) {
memcpy(body.ap, priv->CurrentBSSID, 6);
- ssid_el_p = (u8 *)&body.ssid_el_id;
+ ssid_el_p = &body.ssid_el_id;
bodysize = 18 + priv->SSID_size;
} else {
- ssid_el_p = (u8 *)&body.ap[0];
+ ssid_el_p = &body.ap[0];
bodysize = 12 + priv->SSID_size;
}
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index acd03a4..1b988f2 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -3767,7 +3767,7 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
if (prev_status >= B43_STAT_STARTED) {
err = b43_wireless_core_start(up_dev);
if (err) {
- b43err(wl, "Fatal: Coult not start device for "
+ b43err(wl, "Fatal: Could not start device for "
"selected %s-GHz band\n",
band_to_string(chan->band));
b43_wireless_core_exit(up_dev);
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index f1f8bd0..2d3c664 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -52,7 +52,7 @@ struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring,
desc = ring->descbase;
desc = &(desc[slot]);
- return (struct b43legacy_dmadesc32 *)desc;
+ return desc;
}
static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
@@ -1072,7 +1072,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
/* create a bounce buffer in zone_dma on mapping failure. */
if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
- bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
+ bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 8b06ca5..8156135 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2633,7 +2633,7 @@ static int b43legacy_switch_phymode(struct b43legacy_wl *wl,
if (prev_status >= B43legacy_STAT_STARTED) {
err = b43legacy_wireless_core_start(up_dev);
if (err) {
- b43legacyerr(wl, "Fatal: Coult not start device for "
+ b43legacyerr(wl, "Fatal: Could not start device for "
"newly selected %s-PHY mode\n",
phymode_to_string(new_mode));
b43legacy_wireless_core_exit(up_dev);
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index a8012f2..b8ffea6 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -269,8 +269,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
(&txhdr->plcp), plcp_fragment_len,
rate);
- b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
- (&txhdr->plcp_fb), plcp_fragment_len,
+ b43legacy_generate_plcp_hdr(&txhdr->plcp_fb, plcp_fragment_len,
rate_fb->hw_value);
/* PHY TX Control word */
@@ -340,8 +339,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
(&txhdr->rts_plcp),
len, rts_rate);
- b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
- (&txhdr->rts_plcp_fb),
+ b43legacy_generate_plcp_hdr(&txhdr->rts_plcp_fb,
len, rts_rate_fb);
hdr = (struct ieee80211_hdr *)(&txhdr->rts_frame);
txhdr->rts_dur_fb = hdr->duration_id;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 5c868ba..472f2ef 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -2501,7 +2501,7 @@ clkwait:
int ret, i;
ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, (u8 *) bus->ctrl_frame_buf,
+ SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf,
(u32) bus->ctrl_frame_len);
if (ret < 0) {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index f64c5cf..5e53305 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -1438,7 +1438,7 @@ void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
struct ieee80211_tx_info *tx_info;
while (i != end) {
- skb = (struct sk_buff *)di->txp[i];
+ skb = di->txp[i];
if (skb != NULL) {
tx_info = (struct ieee80211_tx_info *)skb->cb;
(callback_fnc)(tx_info, arg_a);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index d39f7d0..cb73f22 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -8341,7 +8341,7 @@ brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
struct brcms_pub *pub;
/* allocate struct brcms_c_info state and its substructures */
- wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, 0);
+ wlc = brcms_c_attach_malloc(unit, &err, 0);
if (wlc == NULL)
goto fail;
wlc->wiphy = wl->wiphy;
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c
index 75ef8f0..dc447c1 100644
--- a/drivers/net/wireless/hostap/hostap_proc.c
+++ b/drivers/net/wireless/hostap/hostap_proc.c
@@ -58,8 +58,7 @@ static int prism2_stats_proc_read(char *page, char **start, off_t off,
{
char *p = page;
local_info_t *local = (local_info_t *) data;
- struct comm_tallies_sums *sums = (struct comm_tallies_sums *)
- &local->comm_tallies;
+ struct comm_tallies_sums *sums = &local->comm_tallies;
if (off != 0) {
*eof = 1;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 254b892..0df4591 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -7080,9 +7080,7 @@ static int ipw_qos_activate(struct ipw_priv *priv,
}
IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
- err = ipw_send_qos_params_command(priv,
- (struct libipw_qos_parameters *)
- &(qos_parameters[0]));
+ err = ipw_send_qos_params_command(priv, &qos_parameters[0]);
if (err)
IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 203d575..09795af 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1364,7 +1364,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
DMA_BIDIRECTIONAL);
trace_iwlwifi_dev_tx(trans->dev,
- &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
+ &txq->tfds[txq->q.write_ptr],
sizeof(struct iwl_tfd),
&dev_cmd->hdr, firstlen,
skb->data + hdr_len, secondlen);
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index a06cc28..668dd27 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -483,7 +483,7 @@ static ssize_t lbs_rdmac_write(struct file *file,
res = -EFAULT;
goto out_unlock;
}
- priv->mac_offset = simple_strtoul((char *)buf, NULL, 16);
+ priv->mac_offset = simple_strtoul(buf, NULL, 16);
res = count;
out_unlock:
free_page(addr);
@@ -565,7 +565,7 @@ static ssize_t lbs_rdbbp_write(struct file *file,
res = -EFAULT;
goto out_unlock;
}
- priv->bbp_offset = simple_strtoul((char *)buf, NULL, 16);
+ priv->bbp_offset = simple_strtoul(buf, NULL, 16);
res = count;
out_unlock:
free_page(addr);
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index cd3b0d4..64b7dc5 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -302,7 +302,7 @@ error:
static void if_usb_disconnect(struct usb_interface *intf)
{
struct if_usb_card *cardp = usb_get_intfdata(intf);
- struct lbs_private *priv = (struct lbs_private *) cardp->priv;
+ struct lbs_private *priv = cardp->priv;
lbs_deb_enter(LBS_DEB_MAIN);
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 19a5a92..d576dd6 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -253,7 +253,7 @@ lbtf_deb_leave(LBTF_DEB_MAIN);
static void if_usb_disconnect(struct usb_interface *intf)
{
struct if_usb_card *cardp = usb_get_intfdata(intf);
- struct lbtf_private *priv = (struct lbtf_private *) cardp->priv;
+ struct lbtf_private *priv = cardp->priv;
lbtf_deb_enter(LBTF_DEB_MAIN);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 3f38d84..200bcc0 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -571,7 +571,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
skb_dequeue(&data->pending);
}
- skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (skb == NULL)
goto nla_put_failure;
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index fe8ebfe..e535c93 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -101,8 +101,7 @@ int mwifiex_ret_11n_delba(struct mwifiex_private *priv,
{
int tid;
struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
- struct host_cmd_ds_11n_delba *del_ba =
- (struct host_cmd_ds_11n_delba *) &resp->params.del_ba;
+ struct host_cmd_ds_11n_delba *del_ba = &resp->params.del_ba;
uint16_t del_ba_param_set = le16_to_cpu(del_ba->del_ba_param_set);
tid = del_ba_param_set >> DELBA_TID_POS;
@@ -147,8 +146,7 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
int tid;
- struct host_cmd_ds_11n_addba_rsp *add_ba_rsp =
- (struct host_cmd_ds_11n_addba_rsp *) &resp->params.add_ba_rsp;
+ struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
add_ba_rsp->ssn = cpu_to_le16((le16_to_cpu(add_ba_rsp->ssn))
@@ -412,7 +410,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
memcpy((u8 *) bss_co_2040 +
sizeof(struct mwifiex_ie_types_header),
- (u8 *) bss_desc->bcn_bss_co_2040 +
+ bss_desc->bcn_bss_co_2040 +
sizeof(struct ieee_types_header),
le16_to_cpu(bss_co_2040->header.len));
@@ -426,10 +424,8 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
ext_cap->header.type = cpu_to_le16(WLAN_EID_EXT_CAPABILITY);
ext_cap->header.len = cpu_to_le16(sizeof(ext_cap->ext_cap));
- memcpy((u8 *) ext_cap +
- sizeof(struct mwifiex_ie_types_header),
- (u8 *) bss_desc->bcn_ext_cap +
- sizeof(struct ieee_types_header),
+ memcpy((u8 *)ext_cap + sizeof(struct mwifiex_ie_types_header),
+ bss_desc->bcn_ext_cap + sizeof(struct ieee_types_header),
le16_to_cpu(ext_cap->header.len));
*buffer += sizeof(struct mwifiex_ie_types_extcap);
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 77646d7..28366e9 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -105,8 +105,7 @@ static inline u8 mwifiex_space_avail_for_new_ba_stream(
priv = adapter->priv[i];
if (priv)
ba_stream_num += mwifiex_wmm_list_len(
- (struct list_head *)
- &priv->tx_ba_stream_tbl_ptr);
+ &priv->tx_ba_stream_tbl_ptr);
}
return ((ba_stream_num <
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 900ee12..591ccd3 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -297,9 +297,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
*/
int mwifiex_cmd_11n_addba_req(struct host_cmd_ds_command *cmd, void *data_buf)
{
- struct host_cmd_ds_11n_addba_req *add_ba_req =
- (struct host_cmd_ds_11n_addba_req *)
- &cmd->params.add_ba_req;
+ struct host_cmd_ds_11n_addba_req *add_ba_req = &cmd->params.add_ba_req;
cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_REQ);
cmd->size = cpu_to_le16(sizeof(*add_ba_req) + S_DS_GEN);
@@ -321,9 +319,7 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
struct host_cmd_ds_11n_addba_req
*cmd_addba_req)
{
- struct host_cmd_ds_11n_addba_rsp *add_ba_rsp =
- (struct host_cmd_ds_11n_addba_rsp *)
- &cmd->params.add_ba_rsp;
+ struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &cmd->params.add_ba_rsp;
u8 tid;
int win_size;
uint16_t block_ack_param_set;
@@ -368,8 +364,7 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
*/
int mwifiex_cmd_11n_delba(struct host_cmd_ds_command *cmd, void *data_buf)
{
- struct host_cmd_ds_11n_delba *del_ba = (struct host_cmd_ds_11n_delba *)
- &cmd->params.del_ba;
+ struct host_cmd_ds_11n_delba *del_ba = &cmd->params.del_ba;
cmd->command = cpu_to_le16(HostCmd_CMD_11N_DELBA);
cmd->size = cpu_to_le16(sizeof(*del_ba) + S_DS_GEN);
@@ -399,8 +394,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
int start_win, end_win, win_size;
u16 pkt_index;
- tbl = mwifiex_11n_get_rx_reorder_tbl((struct mwifiex_private *) priv,
- tid, ta);
+ tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (!tbl) {
if (pkt_type != PKT_TYPE_BAR)
mwifiex_process_rx_packet(priv->adapter, payload);
@@ -521,9 +515,7 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
- struct host_cmd_ds_11n_addba_rsp *add_ba_rsp =
- (struct host_cmd_ds_11n_addba_rsp *)
- &resp->params.add_ba_rsp;
+ struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
int tid, win_size;
struct mwifiex_rx_reorder_tbl *tbl;
uint16_t block_ack_param_set;
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 6c57e83..c7a177c 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1827,7 +1827,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wdev_priv = wiphy_priv(wiphy);
*(unsigned long *)wdev_priv = (unsigned long)adapter;
- set_wiphy_dev(wiphy, (struct device *)priv->adapter->dev);
+ set_wiphy_dev(wiphy, priv->adapter->dev);
ret = wiphy_register(wiphy);
if (ret < 0) {
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index d6b4fb0..82e63ce 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -1349,22 +1349,16 @@ static int mwifiex_deauthenticate_infra(struct mwifiex_private *priv, u8 *mac)
{
u8 mac_address[ETH_ALEN];
int ret;
- u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
- if (mac) {
- if (!memcmp(mac, zero_mac, sizeof(zero_mac)))
- memcpy((u8 *) &mac_address,
- (u8 *) &priv->curr_bss_params.bss_descriptor.
- mac_address, ETH_ALEN);
- else
- memcpy((u8 *) &mac_address, (u8 *) mac, ETH_ALEN);
- } else {
- memcpy((u8 *) &mac_address, (u8 *) &priv->curr_bss_params.
- bss_descriptor.mac_address, ETH_ALEN);
- }
+ if (!mac || is_zero_ether_addr(mac))
+ memcpy(mac_address,
+ priv->curr_bss_params.bss_descriptor.mac_address,
+ ETH_ALEN);
+ else
+ memcpy(mac_address, mac, ETH_ALEN);
ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_DEAUTHENTICATE,
- HostCmd_ACT_GEN_SET, 0, &mac_address);
+ HostCmd_ACT_GEN_SET, 0, mac_address);
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 884ed63..04dc7ca 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -1034,14 +1034,12 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter,
case TLV_TYPE_TSFTIMESTAMP:
dev_dbg(adapter->dev, "info: SCAN_RESP: TSF "
"timestamp TLV, len = %d\n", tlv_len);
- *tlv_data = (struct mwifiex_ie_types_data *)
- current_tlv;
+ *tlv_data = current_tlv;
break;
case TLV_TYPE_CHANNELBANDLIST:
dev_dbg(adapter->dev, "info: SCAN_RESP: channel"
" band list TLV, len = %d\n", tlv_len);
- *tlv_data = (struct mwifiex_ie_types_data *)
- current_tlv;
+ *tlv_data = current_tlv;
break;
default:
dev_err(adapter->dev,
@@ -1246,15 +1244,15 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
bss_entry->beacon_buf);
break;
case WLAN_EID_BSS_COEX_2040:
- bss_entry->bcn_bss_co_2040 = (u8 *) (current_ptr +
- sizeof(struct ieee_types_header));
+ bss_entry->bcn_bss_co_2040 = current_ptr +
+ sizeof(struct ieee_types_header);
bss_entry->bss_co_2040_offset = (u16) (current_ptr +
sizeof(struct ieee_types_header) -
bss_entry->beacon_buf);
break;
case WLAN_EID_EXT_CAPABILITY:
- bss_entry->bcn_ext_cap = (u8 *) (current_ptr +
- sizeof(struct ieee_types_header));
+ bss_entry->bcn_ext_cap = current_ptr +
+ sizeof(struct ieee_types_header);
bss_entry->ext_cap_offset = (u16) (current_ptr +
sizeof(struct ieee_types_header) -
bss_entry->beacon_buf);
@@ -1684,8 +1682,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
goto done;
}
if (element_id == WLAN_EID_DS_PARAMS) {
- channel = *(u8 *) (current_ptr +
- sizeof(struct ieee_types_header));
+ channel = *(current_ptr + sizeof(struct ieee_types_header));
break;
}
@@ -2020,12 +2017,11 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
if (curr_bss->bcn_bss_co_2040)
curr_bss->bcn_bss_co_2040 =
- (u8 *) (curr_bss->beacon_buf +
- curr_bss->bss_co_2040_offset);
+ (curr_bss->beacon_buf + curr_bss->bss_co_2040_offset);
if (curr_bss->bcn_ext_cap)
- curr_bss->bcn_ext_cap = (u8 *) (curr_bss->beacon_buf +
- curr_bss->ext_cap_offset);
+ curr_bss->bcn_ext_cap = curr_bss->beacon_buf +
+ curr_bss->ext_cap_offset;
}
/*
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 75eaa6f..225d4c7 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -843,8 +843,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
struct host_cmd_ds_mac_reg_access *mac_reg;
cmd->size = cpu_to_le16(sizeof(*mac_reg) + S_DS_GEN);
- mac_reg = (struct host_cmd_ds_mac_reg_access *) &cmd->
- params.mac_reg;
+ mac_reg = &cmd->params.mac_reg;
mac_reg->action = cpu_to_le16(cmd_action);
mac_reg->offset =
cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
@@ -856,8 +855,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
struct host_cmd_ds_bbp_reg_access *bbp_reg;
cmd->size = cpu_to_le16(sizeof(*bbp_reg) + S_DS_GEN);
- bbp_reg = (struct host_cmd_ds_bbp_reg_access *)
- &cmd->params.bbp_reg;
+ bbp_reg = &cmd->params.bbp_reg;
bbp_reg->action = cpu_to_le16(cmd_action);
bbp_reg->offset =
cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
@@ -869,8 +867,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
struct host_cmd_ds_rf_reg_access *rf_reg;
cmd->size = cpu_to_le16(sizeof(*rf_reg) + S_DS_GEN);
- rf_reg = (struct host_cmd_ds_rf_reg_access *)
- &cmd->params.rf_reg;
+ rf_reg = &cmd->params.rf_reg;
rf_reg->action = cpu_to_le16(cmd_action);
rf_reg->offset = cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
rf_reg->value = (u8) le32_to_cpu(reg_rw->value);
@@ -881,8 +878,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
struct host_cmd_ds_pmic_reg_access *pmic_reg;
cmd->size = cpu_to_le16(sizeof(*pmic_reg) + S_DS_GEN);
- pmic_reg = (struct host_cmd_ds_pmic_reg_access *) &cmd->
- params.pmic_reg;
+ pmic_reg = &cmd->params.pmic_reg;
pmic_reg->action = cpu_to_le16(cmd_action);
pmic_reg->offset =
cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
@@ -894,8 +890,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
struct host_cmd_ds_rf_reg_access *cau_reg;
cmd->size = cpu_to_le16(sizeof(*cau_reg) + S_DS_GEN);
- cau_reg = (struct host_cmd_ds_rf_reg_access *)
- &cmd->params.rf_reg;
+ cau_reg = &cmd->params.rf_reg;
cau_reg->action = cpu_to_le16(cmd_action);
cau_reg->offset =
cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
@@ -906,7 +901,6 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
{
struct mwifiex_ds_read_eeprom *rd_eeprom = data_buf;
struct host_cmd_ds_802_11_eeprom_access *cmd_eeprom =
- (struct host_cmd_ds_802_11_eeprom_access *)
&cmd->params.eeprom;
cmd->size = cpu_to_le16(sizeof(*cmd_eeprom) + S_DS_GEN);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 71c9b5b..97715df 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -227,7 +227,7 @@ static int mwifiex_ret_get_log(struct mwifiex_private *priv,
struct mwifiex_ds_get_stats *stats)
{
struct host_cmd_ds_802_11_get_log *get_log =
- (struct host_cmd_ds_802_11_get_log *) &resp->params.get_log;
+ &resp->params.get_log;
if (stats) {
stats->mcast_tx_frame = le32_to_cpu(get_log->mcast_tx_frame);
@@ -282,7 +282,7 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
u32 i;
int ret = 0;
- tlv_buf = (u8 *) ((u8 *) rate_cfg) +
+ tlv_buf = ((u8 *)rate_cfg) +
sizeof(struct host_cmd_ds_tx_rate_cfg);
tlv_buf_len = *(u16 *) (tlv_buf + sizeof(u16));
@@ -730,39 +730,33 @@ static int mwifiex_ret_reg_access(u16 type, struct host_cmd_ds_command *resp,
eeprom = data_buf;
switch (type) {
case HostCmd_CMD_MAC_REG_ACCESS:
- r.mac = (struct host_cmd_ds_mac_reg_access *)
- &resp->params.mac_reg;
+ r.mac = &resp->params.mac_reg;
reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.mac->offset));
reg_rw->value = r.mac->value;
break;
case HostCmd_CMD_BBP_REG_ACCESS:
- r.bbp = (struct host_cmd_ds_bbp_reg_access *)
- &resp->params.bbp_reg;
+ r.bbp = &resp->params.bbp_reg;
reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.bbp->offset));
reg_rw->value = cpu_to_le32((u32) r.bbp->value);
break;
case HostCmd_CMD_RF_REG_ACCESS:
- r.rf = (struct host_cmd_ds_rf_reg_access *)
- &resp->params.rf_reg;
+ r.rf = &resp->params.rf_reg;
reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.rf->offset));
reg_rw->value = cpu_to_le32((u32) r.bbp->value);
break;
case HostCmd_CMD_PMIC_REG_ACCESS:
- r.pmic = (struct host_cmd_ds_pmic_reg_access *)
- &resp->params.pmic_reg;
+ r.pmic = &resp->params.pmic_reg;
reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.pmic->offset));
reg_rw->value = cpu_to_le32((u32) r.pmic->value);
break;
case HostCmd_CMD_CAU_REG_ACCESS:
- r.rf = (struct host_cmd_ds_rf_reg_access *)
- &resp->params.rf_reg;
+ r.rf = &resp->params.rf_reg;
reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.rf->offset));
reg_rw->value = cpu_to_le32((u32) r.rf->value);
break;
case HostCmd_CMD_802_11_EEPROM_ACCESS:
- r.eeprom = (struct host_cmd_ds_802_11_eeprom_access *)
- &resp->params.eeprom;
+ r.eeprom = &resp->params.eeprom;
pr_debug("info: EEPROM read len=%x\n", r.eeprom->byte_count);
if (le16_to_cpu(eeprom->byte_count) <
le16_to_cpu(r.eeprom->byte_count)) {
@@ -838,7 +832,7 @@ static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
struct mwifiex_ds_misc_subsc_evt *sub_event)
{
struct host_cmd_ds_802_11_subsc_evt *cmd_sub_event =
- (struct host_cmd_ds_802_11_subsc_evt *)&resp->params.subsc_evt;
+ &resp->params.subsc_evt;
/* For every subscribe event command (Get/Set/Clear), FW reports the
* current set of subscribed events*/
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 11e731f..b8614a8 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -422,7 +422,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
if (len != -1) {
sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
- sinfo.assoc_req_ies = (u8 *)&event->data[len];
+ sinfo.assoc_req_ies = &event->data[len];
len = (u8 *)sinfo.assoc_req_ies -
(u8 *)&event->frame_control;
sinfo.assoc_req_ies_len =
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index fa8ce51..636daf2 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -905,7 +905,7 @@ int p54_read_eeprom(struct ieee80211_hw *dev)
while (eeprom_size) {
blocksize = min(eeprom_size, maxblocksize);
- ret = p54_download_eeprom(priv, (void *) (eeprom + offset),
+ ret = p54_download_eeprom(priv, eeprom + offset,
offset, blocksize);
if (unlikely(ret))
goto free;
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 18e82b3..9ba8510 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -478,7 +478,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) {
memcpy(&body->longbow.curve_data,
- (void *) entry + sizeof(__le16),
+ entry + sizeof(__le16),
priv->curve_data->entry_size);
} else {
struct p54_scan_body *chan = &body->normal;
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 266d45b..799e148 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -455,7 +455,7 @@ islpci_eth_receive(islpci_private *priv)
"Error mapping DMA address\n");
/* free the skbuf structure before aborting */
- dev_kfree_skb_irq((struct sk_buff *) skb);
+ dev_kfree_skb_irq(skb);
skb = NULL;
break;
}
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 86a738b..598ca1c 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -1849,7 +1849,7 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
pr_debug("ray_cs: interrupt for *dev=%p\n", dev);
local = netdev_priv(dev);
- link = (struct pcmcia_device *)local->finder;
+ link = local->finder;
if (!pcmcia_dev_present(link)) {
pr_debug(
"ray_cs interrupt from device not present or suspended.\n");
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index f4c852c..58e1f7b 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -907,7 +907,7 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
struct rtl_priv *rtlpriv = rtl_priv(hw);
__le16 fc = hdr->frame_control;
- u8 *act = (u8 *) (((u8 *) skb->data + MAC80211_3ADDR_LEN));
+ u8 *act = (u8 *)skb->data + MAC80211_3ADDR_LEN;
u8 category;
if (!ieee80211_is_action(fc))
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c
index 6a2d72b..5b4b4d4 100644
--- a/drivers/net/wireless/rtlwifi/cam.c
+++ b/drivers/net/wireless/rtlwifi/cam.c
@@ -146,7 +146,7 @@ u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
}
rtl_cam_program_entry(hw, ul_entry_idx, mac_addr,
- (u8 *) key_content, us_config);
+ key_content, us_config);
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "<===\n");
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 278e9f9..a18ad2a 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -680,7 +680,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
mac->short_preamble = bss_conf->use_short_preamble;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACK_PREAMBLE,
- (u8 *) (&mac->short_preamble));
+ &mac->short_preamble);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -693,7 +693,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
mac->slot_time = RTL_SLOT_TIME_20;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- (u8 *) (&mac->slot_time));
+ &mac->slot_time);
}
if (changed & BSS_CHANGED_HT) {
@@ -713,7 +713,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
rcu_read_unlock();
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY,
- (u8 *) (&mac->max_mss_density));
+ &mac->max_mss_density);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_FACTOR,
&mac->current_ampdu_factor);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_MIN_SPACE,
@@ -801,7 +801,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
u8 mstatus = RT_MEDIA_CONNECT;
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_JOINBSSRPT,
- (u8 *) (&mstatus));
+ &mstatus);
ppsc->report_linked = true;
}
} else {
@@ -809,7 +809,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
u8 mstatus = RT_MEDIA_DISCONNECT;
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_JOINBSSRPT,
- (u8 *)(&mstatus));
+ &mstatus);
ppsc->report_linked = false;
}
}
@@ -836,7 +836,7 @@ static void rtl_op_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;
mac->tsf = tsf;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *) (&bibss));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CORRECT_TSF, &bibss);
}
static void rtl_op_reset_tsf(struct ieee80211_hw *hw,
@@ -845,7 +845,7 @@ static void rtl_op_reset_tsf(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 tmp = 0;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DUAL_TSF_RST, (u8 *) (&tmp));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DUAL_TSF_RST, &tmp);
}
static void rtl_op_sta_notify(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 1f14380..8e2f9afb 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -352,7 +352,7 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_BYTES,
(u8 *)&efuse_utilized);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_USAGE,
- (u8 *)&efuse_usage);
+ &efuse_usage);
done:
for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++)
kfree(efuse_word[i]);
@@ -409,7 +409,7 @@ void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
else if (type == 2)
efuse_shadow_read_2byte(hw, offset, (u16 *) value);
else if (type == 4)
- efuse_shadow_read_4byte(hw, offset, (u32 *) value);
+ efuse_shadow_read_4byte(hw, offset, value);
}
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 36bffbc..31138fd 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -756,10 +756,10 @@ done:
if (index == rtlpci->rxringcount - 1)
rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
HW_DESC_RXERO,
- (u8 *)&tmp_one);
+ &tmp_one);
rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN,
- (u8 *)&tmp_one);
+ &tmp_one);
index = (index + 1) % rtlpci->rxringcount;
}
@@ -934,7 +934,7 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
__skb_queue_tail(&ring->queue, pskb);
rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true, HW_DESC_OWN,
- (u8 *)&temp_one);
+ &temp_one);
return;
}
@@ -1126,11 +1126,11 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
rxbuffersize);
rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
HW_DESC_RXOWN,
- (u8 *)&tmp_one);
+ &tmp_one);
}
rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
- HW_DESC_RXERO, (u8 *)&tmp_one);
+ HW_DESC_RXERO, &tmp_one);
}
return 0;
}
@@ -1263,7 +1263,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->set_desc((u8 *) entry,
false,
HW_DESC_RXOWN,
- (u8 *)&tmp_one);
+ &tmp_one);
}
rtlpci->rx_ring[rx_queue_idx].idx = 0;
}
@@ -1423,7 +1423,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
__skb_queue_tail(&ring->queue, skb);
rtlpriv->cfg->ops->set_desc((u8 *)pdesc, true,
- HW_DESC_OWN, (u8 *)&temp_one);
+ HW_DESC_OWN, &temp_one);
if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 5ae2664..13ad33e 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -333,10 +333,10 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
rpwm_val = 0x0C; /* RF on */
fw_pwrmode = FW_PS_ACTIVE_MODE;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
- (u8 *) (&rpwm_val));
+ &rpwm_val);
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_PWRMODE,
- (u8 *) (&fw_pwrmode));
+ &fw_pwrmode);
fw_current_inps = false;
rtlpriv->cfg->ops->set_hw_reg(hw,
@@ -356,11 +356,11 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
(u8 *) (&fw_current_inps));
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_PWRMODE,
- (u8 *) (&ppsc->fwctrl_psmode));
+ &ppsc->fwctrl_psmode);
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_SET_RPWM,
- (u8 *) (&rpwm_val));
+ &rpwm_val);
} else {
/* Reset the power save related parameters. */
ppsc->dot11_psmode = EACTIVE;
@@ -446,7 +446,7 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct ieee80211_hdr *hdr = (void *) data;
+ struct ieee80211_hdr *hdr = data;
struct ieee80211_tim_ie *tim_ie;
u8 *tim;
u8 tim_len;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index f7f48c7..a45afda 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -656,9 +656,8 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
} else {
if (rtlpriv->dm.current_turbo_edca) {
u8 tmp = AC0_BE;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_AC_PARAM,
- (u8 *) (&tmp));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+ &tmp);
rtlpriv->dm.current_turbo_edca = false;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 692c8ef..44febfd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -168,7 +168,7 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 *bufferPtr = (u8 *) buffer;
+ u8 *bufferPtr = buffer;
RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes\n", size);
@@ -262,7 +262,7 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
return 1;
pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
- pfwdata = (u8 *) rtlhal->pfirmware;
+ pfwdata = rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
if (IS_FW_HEADER_EXIST(pfwheader)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 5c4d9bc..bd0da7e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -214,13 +214,13 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_AC_PARAM,
- (u8 *) (&e_aci));
+ &e_aci);
}
break;
}
case HW_VAR_ACK_PREAMBLE:{
u8 reg_tmp;
- u8 short_preamble = (bool) (*(u8 *) val);
+ u8 short_preamble = (bool)*val;
reg_tmp = (mac->cur_40_prime_sc) << 5;
if (short_preamble)
reg_tmp |= 0x80;
@@ -232,7 +232,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 min_spacing_to_set;
u8 sec_min_space;
- min_spacing_to_set = *((u8 *) val);
+ min_spacing_to_set = *val;
if (min_spacing_to_set <= 7) {
sec_min_space = 0;
@@ -257,7 +257,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SHORTGI_DENSITY:{
u8 density_to_set;
- density_to_set = *((u8 *) val);
+ density_to_set = *val;
mac->min_space_cfg |= (density_to_set << 3);
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
@@ -284,7 +284,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
else
p_regtoset = regtoset_normal;
- factor_toset = *((u8 *) val);
+ factor_toset = *(val);
if (factor_toset <= 3) {
factor_toset = (1 << (factor_toset + 2));
if (factor_toset > 0xf)
@@ -316,17 +316,17 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_AC_PARAM:{
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *(val);
rtl92c_dm_init_edca_turbo(hw);
if (rtlpci->acm_method != eAcmWay2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_ACM_CTRL,
- (u8 *) (&e_aci));
+ (&e_aci));
break;
}
case HW_VAR_ACM_CTRL:{
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *(val);
union aci_aifsn *p_aci_aifsn =
(union aci_aifsn *)(&(mac->ac[0].aifs));
u8 acm = p_aci_aifsn->f.acm;
@@ -382,7 +382,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_RETRY_LIMIT:{
- u8 retry_limit = ((u8 *) (val))[0];
+ u8 retry_limit = val[0];
rtl_write_word(rtlpriv, REG_RL,
retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -396,13 +396,13 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtlefuse->efuse_usedbytes = *((u16 *) val);
break;
case HW_VAR_EFUSE_USAGE:
- rtlefuse->efuse_usedpercentage = *((u8 *) val);
+ rtlefuse->efuse_usedpercentage = *val;
break;
case HW_VAR_IO_CMD:
rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val));
break;
case HW_VAR_WPA_CONFIG:
- rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
+ rtl_write_byte(rtlpriv, REG_SECCFG, *val);
break;
case HW_VAR_SET_RPWM:{
u8 rpwm_val;
@@ -411,31 +411,30 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
udelay(1);
if (rpwm_val & BIT(7)) {
- rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
- (*(u8 *) val));
+ rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val);
} else {
rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
- ((*(u8 *) val) | BIT(7)));
+ *val | BIT(7));
}
break;
}
case HW_VAR_H2C_FW_PWRMODE:{
- u8 psmode = (*(u8 *) val);
+ u8 psmode = *val;
if ((psmode != FW_PS_ACTIVE_MODE) &&
(!IS_92C_SERIAL(rtlhal->version))) {
rtl92c_dm_rf_saving(hw, true);
}
- rtl92c_set_fw_pwrmode_cmd(hw, (*(u8 *) val));
+ rtl92c_set_fw_pwrmode_cmd(hw, *val);
break;
}
case HW_VAR_FW_PSMODE_STATUS:
ppsc->fw_current_inpsmode = *((bool *) val);
break;
case HW_VAR_H2C_FW_JOINBSSRPT:{
- u8 mstatus = (*(u8 *) val);
+ u8 mstatus = *val;
u8 tmp_regcr, tmp_reg422;
bool recover = false;
@@ -472,7 +471,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_byte(rtlpriv, REG_CR + 1,
(tmp_regcr & ~(BIT(0))));
}
- rtl92c_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
+ rtl92c_set_fw_joinbss_report_cmd(hw, *val);
break;
}
@@ -486,7 +485,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_CORRECT_TSF:{
- u8 btype_ibss = ((u8 *) (val))[0];
+ u8 btype_ibss = val[0];
if (btype_ibss)
_rtl92ce_stop_tx_beacon(hw);
@@ -1589,10 +1588,10 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->autoload_failflag,
hwinfo);
- rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
+ rtlefuse->eeprom_channelplan = *&hwinfo[EEPROM_CHANNELPLAN];
rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
rtlefuse->txpwr_fromeprom = true;
- rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
+ rtlefuse->eeprom_oemid = *&hwinfo[EEPROM_CUSTOMER_ID];
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
@@ -1939,7 +1938,7 @@ void rtl92ce_update_channel_access_setting(struct ieee80211_hw *hw)
u16 sifs_timer;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- (u8 *)&mac->slot_time);
+ &mac->slot_time);
if (!mac->ht_enable)
sifs_timer = 0x0a0a;
else
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 3af874e..52166640 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -605,7 +605,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
bool defaultadapter = true;
struct ieee80211_sta *sta;
- u8 *pdesc = (u8 *) pdesc_tx;
+ u8 *pdesc = pdesc_tx;
u16 seq_number;
__le16 fc = hdr->frame_control;
u8 fw_qsel = _rtl92ce_map_hwqueue_to_fwqueue(skb, hw_queue);
@@ -806,7 +806,7 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
SET_TX_DESC_OWN(pdesc, 1);
- SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
+ SET_TX_DESC_PKT_SIZE(pdesc, (u16) (skb->len));
SET_TX_DESC_FIRST_SEG(pdesc, 1);
SET_TX_DESC_LAST_SEG(pdesc, 1);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 0c74d4f..4bbb711 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -381,11 +381,11 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->eeprom_did = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_DID]);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, " VID = 0x%02x PID = 0x%02x\n",
rtlefuse->eeprom_vid, rtlefuse->eeprom_did);
- rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
+ rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
rtlefuse->eeprom_version =
le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VERSION]);
rtlefuse->txpwr_fromeprom = true;
- rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
+ rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n",
rtlefuse->eeprom_oemid);
if (rtlhal->oem_id == RT_CID_DEFAULT) {
@@ -1660,7 +1660,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
for (e_aci = 0; e_aci < AC_MAX; e_aci++)
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_AC_PARAM,
- (u8 *)(&e_aci));
+ &e_aci);
} else {
u8 sifstime = 0;
u8 u1bAIFS;
@@ -1685,7 +1685,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
case HW_VAR_ACK_PREAMBLE:{
u8 reg_tmp;
- u8 short_preamble = (bool) (*(u8 *) val);
+ u8 short_preamble = (bool)*val;
reg_tmp = 0;
if (short_preamble)
reg_tmp |= 0x80;
@@ -1696,7 +1696,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 min_spacing_to_set;
u8 sec_min_space;
- min_spacing_to_set = *((u8 *) val);
+ min_spacing_to_set = *val;
if (min_spacing_to_set <= 7) {
switch (rtlpriv->sec.pairwise_enc_algorithm) {
case NO_ENCRYPTION:
@@ -1729,7 +1729,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SHORTGI_DENSITY:{
u8 density_to_set;
- density_to_set = *((u8 *) val);
+ density_to_set = *val;
density_to_set &= 0x1f;
mac->min_space_cfg &= 0x07;
mac->min_space_cfg |= (density_to_set << 3);
@@ -1747,7 +1747,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 index = 0;
p_regtoset = regtoset_normal;
- factor_toset = *((u8 *) val);
+ factor_toset = *val;
if (factor_toset <= 3) {
factor_toset = (1 << (factor_toset + 2));
if (factor_toset > 0xf)
@@ -1774,7 +1774,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_AC_PARAM:{
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *val;
u32 u4b_ac_param;
u16 cw_min = le16_to_cpu(mac->ac[e_aci].cw_min);
u16 cw_max = le16_to_cpu(mac->ac[e_aci].cw_max);
@@ -1814,11 +1814,11 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
if (rtlusb->acm_method != eAcmWay2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_ACM_CTRL, (u8 *)(&e_aci));
+ HW_VAR_ACM_CTRL, &e_aci);
break;
}
case HW_VAR_ACM_CTRL:{
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *val;
union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)
(&(mac->ac[0].aifs));
u8 acm = p_aci_aifsn->f.acm;
@@ -1874,7 +1874,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_RETRY_LIMIT:{
- u8 retry_limit = ((u8 *) (val))[0];
+ u8 retry_limit = val[0];
rtl_write_word(rtlpriv, REG_RL,
retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -1891,39 +1891,38 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtlefuse->efuse_usedbytes = *((u16 *) val);
break;
case HW_VAR_EFUSE_USAGE:
- rtlefuse->efuse_usedpercentage = *((u8 *) val);
+ rtlefuse->efuse_usedpercentage = *val;
break;
case HW_VAR_IO_CMD:
rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val));
break;
case HW_VAR_WPA_CONFIG:
- rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
+ rtl_write_byte(rtlpriv, REG_SECCFG, *val);
break;
case HW_VAR_SET_RPWM:{
u8 rpwm_val = rtl_read_byte(rtlpriv, REG_USB_HRPWM);
if (rpwm_val & BIT(7))
- rtl_write_byte(rtlpriv, REG_USB_HRPWM,
- (*(u8 *)val));
+ rtl_write_byte(rtlpriv, REG_USB_HRPWM, *val);
else
rtl_write_byte(rtlpriv, REG_USB_HRPWM,
- ((*(u8 *)val) | BIT(7)));
+ *val | BIT(7));
break;
}
case HW_VAR_H2C_FW_PWRMODE:{
- u8 psmode = (*(u8 *) val);
+ u8 psmode = *val;
if ((psmode != FW_PS_ACTIVE_MODE) &&
(!IS_92C_SERIAL(rtlhal->version)))
rtl92c_dm_rf_saving(hw, true);
- rtl92c_set_fw_pwrmode_cmd(hw, (*(u8 *) val));
+ rtl92c_set_fw_pwrmode_cmd(hw, (*val));
break;
}
case HW_VAR_FW_PSMODE_STATUS:
ppsc->fw_current_inpsmode = *((bool *) val);
break;
case HW_VAR_H2C_FW_JOINBSSRPT:{
- u8 mstatus = (*(u8 *) val);
+ u8 mstatus = *val;
u8 tmp_reg422;
bool recover = false;
@@ -1948,7 +1947,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
tmp_reg422 | BIT(6));
rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
}
- rtl92c_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
+ rtl92c_set_fw_joinbss_report_cmd(hw, (*val));
break;
}
case HW_VAR_AID:{
@@ -1961,7 +1960,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_CORRECT_TSF:{
- u8 btype_ibss = ((u8 *) (val))[0];
+ u8 btype_ibss = val[0];
if (btype_ibss)
_rtl92cu_stop_tx_beacon(hw);
@@ -2184,7 +2183,7 @@ void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw)
u16 sifs_timer;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- (u8 *)&mac->slot_time);
+ &mac->slot_time);
if (!mac->ht_enable)
sifs_timer = 0x0a0a;
else
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 21bc827..2e6eb35 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -668,7 +668,7 @@ void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
SET_TX_DESC_RATE_ID(pdesc, 7);
SET_TX_DESC_MACID(pdesc, 0);
SET_TX_DESC_OWN(pdesc, 1);
- SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
+ SET_TX_DESC_PKT_SIZE(pdesc, (u16)skb->len);
SET_TX_DESC_FIRST_SEG(pdesc, 1);
SET_TX_DESC_LAST_SEG(pdesc, 1);
SET_TX_DESC_OFFSET(pdesc, 0x20);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index a7d63a8..c0201ed 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -696,7 +696,7 @@ static void rtl92d_dm_check_edca_turbo(struct ieee80211_hw *hw)
if (rtlpriv->dm.current_turbo_edca) {
u8 tmp = AC0_BE;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
- (u8 *) (&tmp));
+ &tmp);
rtlpriv->dm.current_turbo_edca = false;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
index f548a8d..895ae6c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
@@ -120,7 +120,7 @@ static void _rtl92d_write_fw(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 *bufferPtr = (u8 *) buffer;
+ u8 *bufferPtr = buffer;
u32 pagenums, remainSize;
u32 page, offset;
@@ -256,8 +256,8 @@ int rtl92d_download_fw(struct ieee80211_hw *hw)
if (rtlpriv->max_fw_size == 0 || !rtlhal->pfirmware)
return 1;
fwsize = rtlhal->fwsize;
- pfwheader = (u8 *) rtlhal->pfirmware;
- pfwdata = (u8 *) rtlhal->pfirmware;
+ pfwheader = rtlhal->pfirmware;
+ pfwdata = rtlhal->pfirmware;
rtlhal->fw_version = (u16) GET_FIRMWARE_HDR_VERSION(pfwheader);
rtlhal->fw_subversion = (u16) GET_FIRMWARE_HDR_SUB_VER(pfwheader);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index b338d52..f4051f4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -235,12 +235,12 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
for (e_aci = 0; e_aci < AC_MAX; e_aci++)
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_AC_PARAM,
- (u8 *) (&e_aci));
+ (&e_aci));
break;
}
case HW_VAR_ACK_PREAMBLE: {
u8 reg_tmp;
- u8 short_preamble = (bool) (*(u8 *) val);
+ u8 short_preamble = (bool) (*val);
reg_tmp = (mac->cur_40_prime_sc) << 5;
if (short_preamble)
@@ -252,7 +252,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 min_spacing_to_set;
u8 sec_min_space;
- min_spacing_to_set = *((u8 *) val);
+ min_spacing_to_set = *val;
if (min_spacing_to_set <= 7) {
sec_min_space = 0;
if (min_spacing_to_set < sec_min_space)
@@ -271,7 +271,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SHORTGI_DENSITY: {
u8 density_to_set;
- density_to_set = *((u8 *) val);
+ density_to_set = *val;
mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg;
mac->min_space_cfg |= (density_to_set << 3);
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
@@ -293,7 +293,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
regtoSet = 0x66626641;
else
regtoSet = 0xb972a841;
- factor_toset = *((u8 *) val);
+ factor_toset = *val;
if (factor_toset <= 3) {
factor_toset = (1 << (factor_toset + 2));
if (factor_toset > 0xf)
@@ -316,15 +316,15 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_AC_PARAM: {
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *val;
rtl92d_dm_init_edca_turbo(hw);
if (rtlpci->acm_method != eAcmWay2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
- (u8 *) (&e_aci));
+ &e_aci);
break;
}
case HW_VAR_ACM_CTRL: {
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *val;
union aci_aifsn *p_aci_aifsn =
(union aci_aifsn *)(&(mac->ac[0].aifs));
u8 acm = p_aci_aifsn->f.acm;
@@ -376,7 +376,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtlpci->receive_config = ((u32 *) (val))[0];
break;
case HW_VAR_RETRY_LIMIT: {
- u8 retry_limit = ((u8 *) (val))[0];
+ u8 retry_limit = val[0];
rtl_write_word(rtlpriv, REG_RL,
retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -390,16 +390,16 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtlefuse->efuse_usedbytes = *((u16 *) val);
break;
case HW_VAR_EFUSE_USAGE:
- rtlefuse->efuse_usedpercentage = *((u8 *) val);
+ rtlefuse->efuse_usedpercentage = *val;
break;
case HW_VAR_IO_CMD:
rtl92d_phy_set_io_cmd(hw, (*(enum io_type *)val));
break;
case HW_VAR_WPA_CONFIG:
- rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
+ rtl_write_byte(rtlpriv, REG_SECCFG, *val);
break;
case HW_VAR_SET_RPWM:
- rtl92d_fill_h2c_cmd(hw, H2C_PWRM, 1, (u8 *) (val));
+ rtl92d_fill_h2c_cmd(hw, H2C_PWRM, 1, (val));
break;
case HW_VAR_H2C_FW_PWRMODE:
break;
@@ -407,7 +407,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
ppsc->fw_current_inpsmode = *((bool *) val);
break;
case HW_VAR_H2C_FW_JOINBSSRPT: {
- u8 mstatus = (*(u8 *) val);
+ u8 mstatus = (*val);
u8 tmp_regcr, tmp_reg422;
bool recover = false;
@@ -435,7 +435,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_byte(rtlpriv, REG_CR + 1,
(tmp_regcr & ~(BIT(0))));
}
- rtl92d_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
+ rtl92d_set_fw_joinbss_report_cmd(hw, (*val));
break;
}
case HW_VAR_AID: {
@@ -447,7 +447,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_CORRECT_TSF: {
- u8 btype_ibss = ((u8 *) (val))[0];
+ u8 btype_ibss = val[0];
if (btype_ibss)
_rtl92de_stop_tx_beacon(hw);
@@ -1794,7 +1794,7 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
"RTL819X Not boot from eeprom, check it !!\n");
return;
}
- rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
+ rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
_rtl92de_read_macphymode_and_bandtype(hw, hwinfo);
/* VID, DID SE 0xA-D */
@@ -2115,7 +2115,7 @@ void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw)
u16 sifs_timer;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- (u8 *)&mac->slot_time);
+ &mac->slot_time);
if (!mac->ht_enable)
sifs_timer = 0x0a0a;
else
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index 1666ef7..f80690d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -560,7 +560,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct ieee80211_sta *sta = info->control.sta;
- u8 *pdesc = (u8 *) pdesc_tx;
+ u8 *pdesc = pdesc_tx;
u16 seq_number;
__le16 fc = hdr->frame_control;
unsigned int buf_len = 0;
@@ -761,11 +761,11 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
SET_TX_DESC_FIRST_SEG(pdesc, 1);
SET_TX_DESC_LAST_SEG(pdesc, 1);
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) (skb->len));
+ SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)skb->len);
SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
SET_TX_DESC_RATE_ID(pdesc, 7);
SET_TX_DESC_MACID(pdesc, 0);
- SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
+ SET_TX_DESC_PKT_SIZE(pdesc, (u16) (skb->len));
SET_TX_DESC_FIRST_SEG(pdesc, 1);
SET_TX_DESC_LAST_SEG(pdesc, 1);
SET_TX_DESC_OFFSET(pdesc, 0x20);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
index 2e11580..465f581 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
@@ -146,7 +146,7 @@ static void _rtl92s_dm_check_edca_turbo(struct ieee80211_hw *hw)
if (rtlpriv->dm.current_turbo_edca) {
u8 tmp = AC0_BE;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
- (u8 *)(&tmp));
+ &tmp);
rtlpriv->dm.current_turbo_edca = false;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index b141c35..4542e69 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -145,13 +145,13 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_AC_PARAM,
- (u8 *)(&e_aci));
+ (&e_aci));
}
break;
}
case HW_VAR_ACK_PREAMBLE:{
u8 reg_tmp;
- u8 short_preamble = (bool) (*(u8 *) val);
+ u8 short_preamble = (bool) (*val);
reg_tmp = (mac->cur_40_prime_sc) << 5;
if (short_preamble)
reg_tmp |= 0x80;
@@ -163,7 +163,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 min_spacing_to_set;
u8 sec_min_space;
- min_spacing_to_set = *((u8 *)val);
+ min_spacing_to_set = *val;
if (min_spacing_to_set <= 7) {
if (rtlpriv->sec.pairwise_enc_algorithm ==
NO_ENCRYPTION)
@@ -194,7 +194,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SHORTGI_DENSITY:{
u8 density_to_set;
- density_to_set = *((u8 *) val);
+ density_to_set = *val;
mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg;
mac->min_space_cfg |= (density_to_set << 3);
@@ -216,7 +216,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
15, 15, 15, 15, 0};
u8 index = 0;
- factor_toset = *((u8 *) val);
+ factor_toset = *val;
if (factor_toset <= 3) {
factor_toset = (1 << (factor_toset + 2));
if (factor_toset > 0xf)
@@ -248,17 +248,17 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_AC_PARAM:{
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *val;
rtl92s_dm_init_edca_turbo(hw);
if (rtlpci->acm_method != eAcmWay2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_ACM_CTRL,
- (u8 *)(&e_aci));
+ &e_aci);
break;
}
case HW_VAR_ACM_CTRL:{
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *val;
union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)(&(
mac->ac[0].aifs));
u8 acm = p_aci_aifsn->f.acm;
@@ -313,7 +313,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_RETRY_LIMIT:{
- u8 retry_limit = ((u8 *) (val))[0];
+ u8 retry_limit = val[0];
rtl_write_word(rtlpriv, RETRY_LIMIT,
retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -328,14 +328,14 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_EFUSE_USAGE: {
- rtlefuse->efuse_usedpercentage = *((u8 *) val);
+ rtlefuse->efuse_usedpercentage = *val;
break;
}
case HW_VAR_IO_CMD: {
break;
}
case HW_VAR_WPA_CONFIG: {
- rtl_write_byte(rtlpriv, REG_SECR, *((u8 *) val));
+ rtl_write_byte(rtlpriv, REG_SECR, *val);
break;
}
case HW_VAR_SET_RPWM:{
@@ -1813,8 +1813,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
else
index = 2;
- tempval = (*(u8 *)&hwinfo[EEPROM_TX_PWR_HT20_DIFF +
- index]) & 0xff;
+ tempval = hwinfo[EEPROM_TX_PWR_HT20_DIFF + index] & 0xff;
rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] = (tempval & 0xF);
rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] =
((tempval >> 4) & 0xF);
@@ -1830,14 +1829,13 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
else
index = 1;
- tempval = (*(u8 *)&hwinfo[EEPROM_TX_PWR_OFDM_DIFF + index])
- & 0xff;
+ tempval = hwinfo[EEPROM_TX_PWR_OFDM_DIFF + index] & 0xff;
rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i] =
(tempval & 0xF);
rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i] =
((tempval >> 4) & 0xF);
- tempval = (*(u8 *)&hwinfo[TX_PWR_SAFETY_CHK]);
+ tempval = hwinfo[TX_PWR_SAFETY_CHK];
rtlefuse->txpwr_safetyflag = (tempval & 0x01);
}
@@ -1876,7 +1874,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
/* Read RF-indication and Tx Power gain
* index diff of legacy to HT OFDM rate. */
- tempval = (*(u8 *)&hwinfo[EEPROM_RFIND_POWERDIFF]) & 0xff;
+ tempval = hwinfo[EEPROM_RFIND_POWERDIFF] & 0xff;
rtlefuse->eeprom_txpowerdiff = tempval;
rtlefuse->legacy_httxpowerdiff =
rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][0];
@@ -1887,7 +1885,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
/* Get TSSI value for each path. */
usvalue = *(u16 *)&hwinfo[EEPROM_TSSI_A];
rtlefuse->eeprom_tssi[RF90_PATH_A] = (u8)((usvalue & 0xff00) >> 8);
- usvalue = *(u8 *)&hwinfo[EEPROM_TSSI_B];
+ usvalue = hwinfo[EEPROM_TSSI_B];
rtlefuse->eeprom_tssi[RF90_PATH_B] = (u8)(usvalue & 0xff);
RTPRINT(rtlpriv, FINIT, INIT_TxPower, "TSSI_A = 0x%x, TSSI_B = 0x%x\n",
@@ -1896,7 +1894,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
/* Read antenna tx power offset of B/C/D to A from EEPROM */
/* and read ThermalMeter from EEPROM */
- tempval = *(u8 *)&hwinfo[EEPROM_THERMALMETER];
+ tempval = hwinfo[EEPROM_THERMALMETER];
rtlefuse->eeprom_thermalmeter = tempval;
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
"thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
@@ -1906,20 +1904,20 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->tssi_13dbm = rtlefuse->eeprom_thermalmeter * 100;
/* Read CrystalCap from EEPROM */
- tempval = (*(u8 *)&hwinfo[EEPROM_CRYSTALCAP]) >> 4;
+ tempval = hwinfo[EEPROM_CRYSTALCAP] >> 4;
rtlefuse->eeprom_crystalcap = tempval;
/* CrystalCap, BIT(12)~15 */
rtlefuse->crystalcap = rtlefuse->eeprom_crystalcap;
/* Read IC Version && Channel Plan */
/* Version ID, Channel plan */
- rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
+ rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
rtlefuse->txpwr_fromeprom = true;
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
"EEPROM ChannelPlan = 0x%4x\n", rtlefuse->eeprom_channelplan);
/* Read Customer ID or Board Type!!! */
- tempval = *(u8 *)&hwinfo[EEPROM_BOARDTYPE];
+ tempval = hwinfo[EEPROM_BOARDTYPE];
/* Change RF type definition */
if (tempval == 0)
rtlphy->rf_type = RF_2T2R;
@@ -1941,7 +1939,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
}
}
rtlefuse->b1ss_support = rtlefuse->b1x1_recvcombine;
- rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMID];
+ rtlefuse->eeprom_oemid = *&hwinfo[EEPROM_CUSTOMID];
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x",
rtlefuse->eeprom_oemid);
@@ -2251,7 +2249,7 @@ void rtl92se_update_channel_access_setting(struct ieee80211_hw *hw)
u16 sifs_timer;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- (u8 *)&mac->slot_time);
+ &mac->slot_time);
sifs_timer = 0x0e0e;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 812b585..36d1cb3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -599,7 +599,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct ieee80211_sta *sta = info->control.sta;
- u8 *pdesc = (u8 *) pdesc_tx;
+ u8 *pdesc = pdesc_tx;
u16 seq_number;
__le16 fc = hdr->frame_control;
u8 reserved_macid = 0;
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 6a28aee..8038a50 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -912,8 +912,7 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
/* Called upon reception of a TX complete interrupt */
int wlcore_tx_complete(struct wl1271 *wl)
{
- struct wl1271_acx_mem_map *memmap =
- (struct wl1271_acx_mem_map *)wl->target_mem_map;
+ struct wl1271_acx_mem_map *memmap = wl->target_mem_map;
u32 count, fw_counter;
u32 i;
int ret;
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 117c412..7ab9222 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -827,7 +827,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values,
static inline int zd_ioread32_locked(struct zd_chip *chip, u32 *value,
const zd_addr_t addr)
{
- return zd_ioread32v_locked(chip, value, (const zd_addr_t *)&addr, 1);
+ return zd_ioread32v_locked(chip, value, &addr, 1);
}
static inline int zd_iowrite16_locked(struct zd_chip *chip, u16 value,
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 99193b4..45e3bb2 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -274,7 +274,7 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
static inline int zd_usb_ioread16(struct zd_usb *usb, u16 *value,
const zd_addr_t addr)
{
- return zd_usb_ioread16v(usb, value, (const zd_addr_t *)&addr, 1);
+ return zd_usb_ioread16v(usb, value, &addr, 1);
}
void zd_usb_iowrite16v_async_start(struct zd_usb *usb);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f4a6fca..682633bf 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1363,8 +1363,6 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
INVALID_PENDING_IDX);
}
- __skb_queue_tail(&netbk->tx_queue, skb);
-
netbk->pending_cons++;
request_gop = xen_netbk_get_requests(netbk, vif,
@@ -1376,6 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
}
gop = request_gop;
+ __skb_queue_tail(&netbk->tx_queue, skb);
+
vif->tx.req_cons = idx;
xen_netbk_check_rx_xenvif(vif);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 2027afe..3089990 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1935,14 +1935,14 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
dev_dbg(&dev->dev, "%s\n", dev->nodename);
- unregister_netdev(info->netdev);
-
xennet_disconnect_backend(info);
- del_timer_sync(&info->rx_refill_timer);
-
xennet_sysfs_delif(info->netdev);
+ unregister_netdev(info->netdev);
+
+ del_timer_sync(&info->rx_refill_timer);
+
free_percpu(info->stats);
free_netdev(info->netdev);
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 2574abd..8e6c25f 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -57,6 +57,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
const __be32 *paddr;
u32 addr;
int len;
+ bool is_c45;
/* A PHY must have a reg property in the range [0-31] */
paddr = of_get_property(child, "reg", &len);
@@ -79,11 +80,18 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
mdio->irq[addr] = PHY_POLL;
}
- phy = get_phy_device(mdio, addr);
+ is_c45 = of_device_is_compatible(child,
+ "ethernet-phy-ieee802.3-c45");
+ phy = get_phy_device(mdio, addr, is_c45);
+
if (!phy || IS_ERR(phy)) {
- dev_err(&mdio->dev, "error probing PHY at address %i\n",
- addr);
- continue;
+ phy = phy_device_create(mdio, addr, 0, false, NULL);
+ if (!phy || IS_ERR(phy)) {
+ dev_err(&mdio->dev,
+ "error creating PHY at address %i\n",
+ addr);
+ continue;
+ }
}
/* Associate the OF node with the device structure so it
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c
index da14432..efc4b7f 100644
--- a/drivers/oprofile/oprofile_perf.c
+++ b/drivers/oprofile/oprofile_perf.c
@@ -25,7 +25,7 @@ static int oprofile_perf_enabled;
static DEFINE_MUTEX(oprofile_perf_mutex);
static struct op_counter_config *counter_config;
-static struct perf_event **perf_events[nr_cpumask_bits];
+static struct perf_event **perf_events[NR_CPUS];
static int num_counters;
/*
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 447e834..77cb54a 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1744,6 +1744,11 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
if (target_state == PCI_POWER_ERROR)
return -EIO;
+ /* Some devices mustn't be in D3 during system sleep */
+ if (target_state == PCI_D3hot &&
+ (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP))
+ return 0;
+
pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
error = pci_set_power_state(dev, target_state);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 2a75216..194b243a 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2929,6 +2929,32 @@ static void __devinit disable_igfx_irq(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
+/*
+ * The Intel 6 Series/C200 Series chipset's EHCI controllers on many
+ * ASUS motherboards will cause memory corruption or a system crash
+ * if they are in D3 while the system is put into S3 sleep.
+ */
+static void __devinit asus_ehci_no_d3(struct pci_dev *dev)
+{
+ const char *sys_info;
+ static const char good_Asus_board[] = "P8Z68-V";
+
+ if (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP)
+ return;
+ if (dev->subsystem_vendor != PCI_VENDOR_ID_ASUSTEK)
+ return;
+ sys_info = dmi_get_system_info(DMI_BOARD_NAME);
+ if (sys_info && memcmp(sys_info, good_Asus_board,
+ sizeof(good_Asus_board) - 1) == 0)
+ return;
+
+ dev_info(&dev->dev, "broken D3 during system sleep on ASUS\n");
+ dev->dev_flags |= PCI_DEV_FLAGS_NO_D3_DURING_SLEEP;
+ device_set_wakeup_capable(&dev->dev, false);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c26, asus_ehci_no_d3);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c2d, asus_ehci_no_d3);
+
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
struct pci_fixup *end)
{
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index c3b331b..0cc053a 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -61,7 +61,7 @@ static LIST_HEAD(pinctrl_maps);
list_for_each_entry(_maps_node_, &pinctrl_maps, node) \
for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \
_i_ < _maps_node_->num_maps; \
- i++, _map_ = &_maps_node_->maps[_i_])
+ _i_++, _map_ = &_maps_node_->maps[_i_])
/**
* pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support
diff --git a/drivers/pinctrl/pinctrl-imx.c b/drivers/pinctrl/pinctrl-imx.c
index f6e7c67..dd6d93a 100644
--- a/drivers/pinctrl/pinctrl-imx.c
+++ b/drivers/pinctrl/pinctrl-imx.c
@@ -27,16 +27,16 @@
#include "core.h"
#include "pinctrl-imx.h"
-#define IMX_PMX_DUMP(info, p, m, c, n) \
-{ \
- int i, j; \
- printk("Format: Pin Mux Config\n"); \
- for (i = 0; i < n; i++) { \
- j = p[i]; \
- printk("%s %d 0x%lx\n", \
- info->pins[j].name, \
- m[i], c[i]); \
- } \
+#define IMX_PMX_DUMP(info, p, m, c, n) \
+{ \
+ int i, j; \
+ printk(KERN_DEBUG "Format: Pin Mux Config\n"); \
+ for (i = 0; i < n; i++) { \
+ j = p[i]; \
+ printk(KERN_DEBUG "%s %d 0x%lx\n", \
+ info->pins[j].name, \
+ m[i], c[i]); \
+ } \
}
/* The bits in CONFIG cell defined in binding doc*/
@@ -173,8 +173,10 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
/* create mux map */
parent = of_get_parent(np);
- if (!parent)
+ if (!parent) {
+ kfree(new_map);
return -EINVAL;
+ }
new_map[0].type = PIN_MAP_TYPE_MUX_GROUP;
new_map[0].data.mux.function = parent->name;
new_map[0].data.mux.group = np->name;
@@ -193,7 +195,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
}
dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n",
- new_map->data.mux.function, new_map->data.mux.group, map_num);
+ (*map)->data.mux.function, (*map)->data.mux.group, map_num);
return 0;
}
@@ -201,10 +203,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
static void imx_dt_free_map(struct pinctrl_dev *pctldev,
struct pinctrl_map *map, unsigned num_maps)
{
- int i;
-
- for (i = 0; i < num_maps; i++)
- kfree(map);
+ kfree(map);
}
static struct pinctrl_ops imx_pctrl_ops = {
@@ -475,9 +474,8 @@ static int __devinit imx_pinctrl_parse_groups(struct device_node *np,
grp->configs[j] = config & ~IMX_PAD_SION;
}
-#ifdef DEBUG
IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins);
-#endif
+
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-mxs.c b/drivers/pinctrl/pinctrl-mxs.c
index 556e45a..4ba4636 100644
--- a/drivers/pinctrl/pinctrl-mxs.c
+++ b/drivers/pinctrl/pinctrl-mxs.c
@@ -107,8 +107,10 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
/* Compose group name */
group = kzalloc(length, GFP_KERNEL);
- if (!group)
- return -ENOMEM;
+ if (!group) {
+ ret = -ENOMEM;
+ goto free;
+ }
snprintf(group, length, "%s.%d", np->name, reg);
new_map[i].data.mux.group = group;
i++;
@@ -118,7 +120,7 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
pconfig = kmemdup(&config, sizeof(config), GFP_KERNEL);
if (!pconfig) {
ret = -ENOMEM;
- goto free;
+ goto free_group;
}
new_map[i].type = PIN_MAP_TYPE_CONFIGS_GROUP;
@@ -133,6 +135,9 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
return 0;
+free_group:
+ if (!purecfg)
+ kfree(group);
free:
kfree(new_map);
return ret;
@@ -511,6 +516,7 @@ int __devinit mxs_pinctrl_probe(struct platform_device *pdev,
return 0;
err:
+ platform_set_drvdata(pdev, NULL);
iounmap(d->base);
return ret;
}
@@ -520,6 +526,7 @@ int __devexit mxs_pinctrl_remove(struct platform_device *pdev)
{
struct mxs_pinctrl_data *d = platform_get_drvdata(pdev);
+ platform_set_drvdata(pdev, NULL);
pinctrl_unregister(d->pctl);
iounmap(d->base);
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index b26395d..3e7e47d 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -673,7 +673,7 @@ static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
* wakeup is anyhow controlled by the RIMSC and FIMSC registers.
*/
if (nmk_chip->sleepmode && on) {
- __nmk_gpio_set_slpm(nmk_chip, gpio % nmk_chip->chip.base,
+ __nmk_gpio_set_slpm(nmk_chip, gpio % NMK_GPIO_PER_CHIP,
NMK_GPIO_SLPM_WAKEUP_ENABLE);
}
@@ -1246,6 +1246,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
ret = PTR_ERR(clk);
goto out_unmap;
}
+ clk_prepare(clk);
nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL);
if (!nmk_chip) {
@@ -1437,7 +1438,27 @@ static int nmk_pmx_enable(struct pinctrl_dev *pctldev, unsigned function,
dev_dbg(npct->dev, "enable group %s, %u pins\n", g->name, g->npins);
- /* Handle this special glitch on altfunction C */
+ /*
+ * If we're setting altfunc C by setting both AFSLA and AFSLB to 1,
+ * we may pass through an undesired state. In this case we take
+ * some extra care.
+ *
+ * Safe sequence used to switch IOs between GPIO and Alternate-C mode:
+ * - Save SLPM registers (since we have a shadow register in the
+ * nmk_chip we're using that as backup)
+ * - Set SLPM=0 for the IOs you want to switch and others to 1
+ * - Configure the GPIO registers for the IOs that are being switched
+ * - Set IOFORCE=1
+ * - Modify the AFLSA/B registers for the IOs that are being switched
+ * - Set IOFORCE=0
+ * - Restore SLPM registers
+ * - Any spurious wake up event during switch sequence to be ignored
+ * and cleared
+ *
+ * We REALLY need to save ALL slpm registers, because the external
+ * IOFORCE will switch *all* ports to their sleepmode setting to as
+ * to avoid glitches. (Not just one port!)
+ */
glitch = (g->altsetting == NMK_GPIO_ALT_C);
if (glitch) {
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
index ba15b1a..e9f8e7d 100644
--- a/drivers/pinctrl/pinctrl-sirf.c
+++ b/drivers/pinctrl/pinctrl-sirf.c
@@ -1184,7 +1184,7 @@ out_no_gpio_remap:
return ret;
}
-static const struct of_device_id pinmux_ids[] = {
+static const struct of_device_id pinmux_ids[] __devinitconst = {
{ .compatible = "sirf,prima2-gpio-pinmux" },
{}
};
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index 5ae50aa..b3f6b28 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* Inspired from:
* - U300 Pinctl drivers
diff --git a/drivers/pinctrl/spear/pinctrl-spear.h b/drivers/pinctrl/spear/pinctrl-spear.h
index 9155783..d950eb7 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.h
+++ b/drivers/pinctrl/spear/pinctrl-spear.h
@@ -2,7 +2,7 @@
* Driver header file for the ST Microelectronics SPEAr pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c
index fff168b..d6cca8c 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1310.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr1310 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -2192,7 +2192,7 @@ static void __exit spear1310_pinctrl_exit(void)
}
module_exit(spear1310_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c
index a8ab2a6..a0eb057 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1340.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1340.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr1340 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -1983,7 +1983,7 @@ static void __exit spear1340_pinctrl_exit(void)
}
module_exit(spear1340_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear300.c b/drivers/pinctrl/spear/pinctrl-spear300.c
index 9c82a35..4dfc284 100644
--- a/drivers/pinctrl/spear/pinctrl-spear300.c
+++ b/drivers/pinctrl/spear/pinctrl-spear300.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr300 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -702,7 +702,7 @@ static void __exit spear300_pinctrl_exit(void)
}
module_exit(spear300_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear310.c b/drivers/pinctrl/spear/pinctrl-spear310.c
index 1a97076..9688369 100644
--- a/drivers/pinctrl/spear/pinctrl-spear310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear310.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr310 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -425,7 +425,7 @@ static void __exit spear310_pinctrl_exit(void)
}
module_exit(spear310_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, SPEAr310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear320.c b/drivers/pinctrl/spear/pinctrl-spear320.c
index de726e6..020b1e0 100644
--- a/drivers/pinctrl/spear/pinctrl-spear320.c
+++ b/drivers/pinctrl/spear/pinctrl-spear320.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr320 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -3462,7 +3462,7 @@ static void __exit spear320_pinctrl_exit(void)
}
module_exit(spear320_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.c b/drivers/pinctrl/spear/pinctrl-spear3xx.c
index 91c883b..0242378 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.c
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr3xx pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.h b/drivers/pinctrl/spear/pinctrl-spear3xx.h
index 5d5fdd8..31f4434 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.h
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.h
@@ -2,7 +2,7 @@
* Header file for the ST Microelectronics SPEAr3xx pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 639db4d..2fd9d36 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -5,7 +5,7 @@
*
* (C) 2009 - Peter Feuerer peter (a) piie.net
* http://piie.net
- * 2009 Borislav Petkov <petkovbb@gmail.com>
+ * 2009 Borislav Petkov bp (a) alien8.de
*
* Inspired by and many thanks to:
* o acerfand - Rachel Greenham
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index e1b8c54..a739f5c 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -794,17 +794,17 @@ static __devinit int ab8500_regulator_register(struct platform_device *pdev,
}
static struct of_regulator_match ab8500_regulator_matches[] = {
- { .name = "LDO-AUX1", .driver_data = (void *) AB8500_LDO_AUX1, },
- { .name = "LDO-AUX2", .driver_data = (void *) AB8500_LDO_AUX2, },
- { .name = "LDO-AUX3", .driver_data = (void *) AB8500_LDO_AUX3, },
- { .name = "LDO-INTCORE", .driver_data = (void *) AB8500_LDO_INTCORE, },
- { .name = "LDO-TVOUT", .driver_data = (void *) AB8500_LDO_TVOUT, },
- { .name = "LDO-USB", .driver_data = (void *) AB8500_LDO_USB, },
- { .name = "LDO-AUDIO", .driver_data = (void *) AB8500_LDO_AUDIO, },
- { .name = "LDO-ANAMIC1", .driver_data = (void *) AB8500_LDO_ANAMIC1, },
- { .name = "LDO-ANAMIC2", .driver_data = (void *) AB8500_LDO_ANAMIC2, },
- { .name = "LDO-DMIC", .driver_data = (void *) AB8500_LDO_DMIC, },
- { .name = "LDO-ANA", .driver_data = (void *) AB8500_LDO_ANA, },
+ { .name = "ab8500_ldo_aux1", .driver_data = (void *) AB8500_LDO_AUX1, },
+ { .name = "ab8500_ldo_aux2", .driver_data = (void *) AB8500_LDO_AUX2, },
+ { .name = "ab8500_ldo_aux3", .driver_data = (void *) AB8500_LDO_AUX3, },
+ { .name = "ab8500_ldo_intcore", .driver_data = (void *) AB8500_LDO_INTCORE, },
+ { .name = "ab8500_ldo_tvout", .driver_data = (void *) AB8500_LDO_TVOUT, },
+ { .name = "ab8500_ldo_usb", .driver_data = (void *) AB8500_LDO_USB, },
+ { .name = "ab8500_ldo_audio", .driver_data = (void *) AB8500_LDO_AUDIO, },
+ { .name = "ab8500_ldo_anamic1", .driver_data = (void *) AB8500_LDO_ANAMIC1, },
+ { .name = "ab8500_ldo_amamic2", .driver_data = (void *) AB8500_LDO_ANAMIC2, },
+ { .name = "ab8500_ldo_dmic", .driver_data = (void *) AB8500_LDO_DMIC, },
+ { .name = "ab8500_ldo_ana", .driver_data = (void *) AB8500_LDO_ANA, },
};
static __devinit int
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 3660bac..e82e7ea 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -224,7 +224,7 @@ static struct platform_driver anatop_regulator_driver = {
.of_match_table = of_anatop_regulator_match_tbl,
},
.probe = anatop_regulator_probe,
- .remove = anatop_regulator_remove,
+ .remove = __devexit_p(anatop_regulator_remove),
};
static int __init anatop_regulator_init(void)
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7584a74..09a737c 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2050,6 +2050,9 @@ int regulator_map_voltage_linear(struct regulator_dev *rdev,
return -EINVAL;
}
+ if (min_uV < rdev->desc->min_uV)
+ min_uV = rdev->desc->min_uV;
+
ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step);
if (ret < 0)
return ret;
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 968f97f..9dbb491 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -452,26 +452,26 @@ static __devinit int db8500_regulator_register(struct platform_device *pdev,
}
static struct of_regulator_match db8500_regulator_matches[] = {
- { .name = "db8500-vape", .driver_data = (void *) DB8500_REGULATOR_VAPE, },
- { .name = "db8500-varm", .driver_data = (void *) DB8500_REGULATOR_VARM, },
- { .name = "db8500-vmodem", .driver_data = (void *) DB8500_REGULATOR_VMODEM, },
- { .name = "db8500-vpll", .driver_data = (void *) DB8500_REGULATOR_VPLL, },
- { .name = "db8500-vsmps1", .driver_data = (void *) DB8500_REGULATOR_VSMPS1, },
- { .name = "db8500-vsmps2", .driver_data = (void *) DB8500_REGULATOR_VSMPS2, },
- { .name = "db8500-vsmps3", .driver_data = (void *) DB8500_REGULATOR_VSMPS3, },
- { .name = "db8500-vrf1", .driver_data = (void *) DB8500_REGULATOR_VRF1, },
- { .name = "db8500-sva-mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, },
- { .name = "db8500-sva-mmdsp-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, },
- { .name = "db8500-sva-pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, },
- { .name = "db8500-sia-mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, },
- { .name = "db8500-sia-mmdsp-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, },
- { .name = "db8500-sia-pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, },
- { .name = "db8500-sga", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, },
- { .name = "db8500-b2r2-mcde", .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, },
- { .name = "db8500-esram12", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, },
- { .name = "db8500-esram12-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, },
- { .name = "db8500-esram34", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, },
- { .name = "db8500-esram34-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, },
+ { .name = "db8500_vape", .driver_data = (void *) DB8500_REGULATOR_VAPE, },
+ { .name = "db8500_varm", .driver_data = (void *) DB8500_REGULATOR_VARM, },
+ { .name = "db8500_vmodem", .driver_data = (void *) DB8500_REGULATOR_VMODEM, },
+ { .name = "db8500_vpll", .driver_data = (void *) DB8500_REGULATOR_VPLL, },
+ { .name = "db8500_vsmps1", .driver_data = (void *) DB8500_REGULATOR_VSMPS1, },
+ { .name = "db8500_vsmps2", .driver_data = (void *) DB8500_REGULATOR_VSMPS2, },
+ { .name = "db8500_vsmps3", .driver_data = (void *) DB8500_REGULATOR_VSMPS3, },
+ { .name = "db8500_vrf1", .driver_data = (void *) DB8500_REGULATOR_VRF1, },
+ { .name = "db8500_sva_mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, },
+ { .name = "db8500_sva_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, },
+ { .name = "db8500_sva_pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, },
+ { .name = "db8500_sia_mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, },
+ { .name = "db8500_sia_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, },
+ { .name = "db8500_sia_pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, },
+ { .name = "db8500_sga", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, },
+ { .name = "db8500_b2r2_mcde", .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, },
+ { .name = "db8500_esram12", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, },
+ { .name = "db8500_esram12_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, },
+ { .name = "db8500_esram34", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, },
+ { .name = "db8500_esram34_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, },
};
static __devinit int
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 9997d7a..242851a 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -101,16 +101,20 @@ static int gpio_regulator_get_value(struct regulator_dev *dev)
}
static int gpio_regulator_set_value(struct regulator_dev *dev,
- int min, int max)
+ int min, int max, unsigned *selector)
{
struct gpio_regulator_data *data = rdev_get_drvdata(dev);
- int ptr, target, state, best_val = INT_MAX;
+ int ptr, target = 0, state, best_val = INT_MAX;
for (ptr = 0; ptr < data->nr_states; ptr++)
if (data->states[ptr].value < best_val &&
data->states[ptr].value >= min &&
- data->states[ptr].value <= max)
+ data->states[ptr].value <= max) {
target = data->states[ptr].gpios;
+ best_val = data->states[ptr].value;
+ if (selector)
+ *selector = ptr;
+ }
if (best_val == INT_MAX)
return -EINVAL;
@@ -128,7 +132,7 @@ static int gpio_regulator_set_voltage(struct regulator_dev *dev,
int min_uV, int max_uV,
unsigned *selector)
{
- return gpio_regulator_set_value(dev, min_uV, max_uV);
+ return gpio_regulator_set_value(dev, min_uV, max_uV, selector);
}
static int gpio_regulator_list_voltage(struct regulator_dev *dev,
@@ -145,7 +149,7 @@ static int gpio_regulator_list_voltage(struct regulator_dev *dev,
static int gpio_regulator_set_current_limit(struct regulator_dev *dev,
int min_uA, int max_uA)
{
- return gpio_regulator_set_value(dev, min_uA, max_uA);
+ return gpio_regulator_set_value(dev, min_uA, max_uA, NULL);
}
static struct regulator_ops gpio_regulator_voltage_ops = {
@@ -286,7 +290,7 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev)
cfg.dev = &pdev->dev;
cfg.init_data = config->init_data;
- cfg.driver_data = &drvdata;
+ cfg.driver_data = drvdata;
drvdata->dev = regulator_register(&drvdata->desc, &cfg);
if (IS_ERR(drvdata->dev)) {
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 1f4bb80..9d540cd 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -259,6 +259,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
config.dev = &client->dev;
config.init_data = pdata->regulator;
config.driver_data = info;
+ config.regmap = info->regmap;
info->regulator = regulator_register(&dcdc_desc, &config);
if (IS_ERR(info->regulator)) {
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index c4435f6..795f75a 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -673,7 +673,9 @@ static __devinit int palmas_probe(struct platform_device *pdev)
pmic->desc[id].ops = &palmas_ops_smps10;
pmic->desc[id].vsel_reg = PALMAS_SMPS10_CTRL;
pmic->desc[id].vsel_mask = SMPS10_VSEL;
- pmic->desc[id].enable_reg = PALMAS_SMPS10_STATUS;
+ pmic->desc[id].enable_reg =
+ PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
+ PALMAS_SMPS10_STATUS);
pmic->desc[id].enable_mask = SMPS10_BOOST_EN;
}
@@ -739,7 +741,8 @@ static __devinit int palmas_probe(struct platform_device *pdev)
pmic->desc[id].type = REGULATOR_VOLTAGE;
pmic->desc[id].owner = THIS_MODULE;
- pmic->desc[id].enable_reg = palmas_regs_info[id].ctrl_addr;
+ pmic->desc[id].enable_reg = PALMAS_BASE_TO_REG(PALMAS_LDO_BASE,
+ palmas_regs_info[id].ctrl_addr);
pmic->desc[id].enable_mask = PALMAS_LDO1_CTRL_MODE_ACTIVE;
if (pdata && pdata->reg_data)
@@ -775,9 +778,6 @@ static __devinit int palmas_probe(struct platform_device *pdev)
err_unregister_regulator:
while (--id >= 0)
regulator_unregister(pmic->rdev[id]);
- kfree(pmic->rdev);
- kfree(pmic->desc);
- kfree(pmic);
return ret;
}
@@ -788,10 +788,6 @@ static int __devexit palmas_remove(struct platform_device *pdev)
for (id = 0; id < PALMAS_NUM_REGS; id++)
regulator_unregister(pmic->rdev[id]);
-
- kfree(pmic->rdev);
- kfree(pmic->desc);
- kfree(pmic);
return 0;
}
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 290d6fc..9caadb4 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -451,7 +451,7 @@ static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev,
desc = reg_voltage_map[reg_id];
- if (old_sel < new_sel)
+ if ((old_sel < new_sel) && s5m8767->ramp_delay)
return DIV_ROUND_UP(desc->step * (new_sel - old_sel),
s5m8767->ramp_delay * 1000);
return 0;
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index f841bd0..8f1be85 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -71,7 +71,7 @@
/* LDO_CTRL bitfields */
#define TPS65023_LDO_CTRL_LDOx_SHIFT(ldo_id) ((ldo_id)*4)
-#define TPS65023_LDO_CTRL_LDOx_MASK(ldo_id) (0x0F << ((ldo_id)*4))
+#define TPS65023_LDO_CTRL_LDOx_MASK(ldo_id) (0x07 << ((ldo_id)*4))
/* Number of step-down converters available */
#define TPS65023_NUM_DCDC 3
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index b88b3df..1b299aa 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -482,7 +482,7 @@ static int get_voltage_sel(struct regulator_dev *rdev)
info = &supply_info[rdev_get_id(rdev)];
if (info->flags & FIXED_VOLTAGE)
- return info->fixed_voltage;
+ return 0;
ret = read_field(hw, &info->voltage);
if (ret < 0)
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 69425c4..de138e3 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -182,7 +182,7 @@ static int __devinit omap_rproc_probe(struct platform_device *pdev)
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
- dev_err(pdev->dev.parent, "dma_set_coherent_mask: %d\n", ret);
+ dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret);
return ret;
}
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 8ea7bcc..66324ee 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -247,7 +247,7 @@ rproc_load_segments(struct rproc *rproc, const u8 *elf_data, size_t len)
}
if (offset + filesz > len) {
- dev_err(dev, "truncated fw: need 0x%x avail 0x%x\n",
+ dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
offset + filesz, len);
ret = -EINVAL;
break;
@@ -934,7 +934,7 @@ static void rproc_resource_cleanup(struct rproc *rproc)
unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
if (unmapped != entry->len) {
/* nothing much to do besides complaining */
- dev_err(dev, "failed to unmap %u/%u\n", entry->len,
+ dev_err(dev, "failed to unmap %u/%zu\n", entry->len,
unmapped);
}
@@ -1020,7 +1020,7 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
ehdr = (struct elf32_hdr *)fw->data;
- dev_info(dev, "Booting fw image %s, size %d\n", name, fw->size);
+ dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
/*
* if enabling an IOMMU isn't relevant for this rproc, this is
@@ -1041,8 +1041,10 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
/* look for the resource table */
table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz);
- if (!table)
+ if (!table) {
+ ret = -EINVAL;
goto clean_up;
+ }
/* handle fw resources which are required to boot rproc */
ret = rproc_handle_boot_rsc(rproc, table, tablesz);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 7d5f56e..4267789 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -910,14 +910,17 @@ static inline int cmos_poweroff(struct device *dev)
static u32 rtc_handler(void *context)
{
+ struct device *dev = context;
+
+ pm_wakeup_event(dev, 0);
acpi_clear_event(ACPI_EVENT_RTC);
acpi_disable_event(ACPI_EVENT_RTC, 0);
return ACPI_INTERRUPT_HANDLED;
}
-static inline void rtc_wake_setup(void)
+static inline void rtc_wake_setup(struct device *dev)
{
- acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
+ acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev);
/*
* After the RTC handler is installed, the Fixed_RTC event should
* be disabled. Only when the RTC alarm is set will it be enabled.
@@ -950,7 +953,7 @@ cmos_wake_setup(struct device *dev)
if (acpi_disabled)
return;
- rtc_wake_setup();
+ rtc_wake_setup(dev);
acpi_rtc_info.wake_on = rtc_wake_on;
acpi_rtc_info.wake_off = rtc_wake_off;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 7be5e97..73ac63d 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2700,10 +2700,11 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
rcu_read_lock();
dst = skb_dst(skb);
if (dst)
- n = dst_get_neighbour_noref(dst);
+ n = dst_neigh_lookup_skb(dst, skb);
if (n) {
cast_type = n->type;
rcu_read_unlock();
+ neigh_release(n);
if ((cast_type == RTN_BROADCAST) ||
(cast_type == RTN_MULTICAST) ||
(cast_type == RTN_ANYCAST))
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 0578fa0d..42969e8 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -59,6 +59,7 @@
#include "57xx_hsi_bnx2fc.h"
#include "bnx2fc_debug.h"
#include "../../net/ethernet/broadcom/cnic_if.h"
+#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h"
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
@@ -84,6 +85,8 @@
#define BNX2FC_NUM_MAX_SESS 1024
#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS))
+#define BNX2FC_MAX_NPIV 256
+
#define BNX2FC_MAX_OUTSTANDING_CMNDS 2048
#define BNX2FC_CAN_QUEUE BNX2FC_MAX_OUTSTANDING_CMNDS
#define BNX2FC_ELSTM_XIDS BNX2FC_CAN_QUEUE
@@ -206,6 +209,7 @@ struct bnx2fc_hba {
struct fcoe_statistics_params *stats_buffer;
dma_addr_t stats_buf_dma;
struct completion stat_req_done;
+ struct fcoe_capabilities fcoe_cap;
/*destroy handling */
struct timer_list destroy_timer;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f52f668f..05fe662 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -1326,6 +1326,7 @@ static void bnx2fc_hba_destroy(struct bnx2fc_hba *hba)
static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
{
struct bnx2fc_hba *hba;
+ struct fcoe_capabilities *fcoe_cap;
int rc;
hba = kzalloc(sizeof(*hba), GFP_KERNEL);
@@ -1361,6 +1362,21 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
goto cmgr_err;
}
+ fcoe_cap = &hba->fcoe_cap;
+
+ fcoe_cap->capability1 = BNX2FC_TM_MAX_SQES <<
+ FCOE_IOS_PER_CONNECTION_SHIFT;
+ fcoe_cap->capability1 |= BNX2FC_NUM_MAX_SESS <<
+ FCOE_LOGINS_PER_PORT_SHIFT;
+ fcoe_cap->capability2 = BNX2FC_MAX_OUTSTANDING_CMNDS <<
+ FCOE_NUMBER_OF_EXCHANGES_SHIFT;
+ fcoe_cap->capability2 |= BNX2FC_MAX_NPIV <<
+ FCOE_NPIV_WWN_PER_PORT_SHIFT;
+ fcoe_cap->capability3 = BNX2FC_NUM_MAX_SESS <<
+ FCOE_TARGETS_SUPPORTED_SHIFT;
+ fcoe_cap->capability3 |= BNX2FC_MAX_OUTSTANDING_CMNDS <<
+ FCOE_OUTSTANDING_COMMANDS_SHIFT;
+ fcoe_cap->capability4 = FCOE_CAPABILITY4_STATEFUL;
init_waitqueue_head(&hba->shutdown_wait);
init_waitqueue_head(&hba->destroy_wait);
@@ -1691,6 +1707,32 @@ static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
hba->pcidev = NULL;
}
+/**
+ * bnx2fc_ulp_get_stats - cnic callback to populate FCoE stats
+ *
+ * @handle: transport handle pointing to adapter struture
+ */
+static int bnx2fc_ulp_get_stats(void *handle)
+{
+ struct bnx2fc_hba *hba = handle;
+ struct cnic_dev *cnic;
+ struct fcoe_stats_info *stats_addr;
+
+ if (!hba)
+ return -EINVAL;
+
+ cnic = hba->cnic;
+ stats_addr = &cnic->stats_addr->fcoe_stat;
+ if (!stats_addr)
+ return -EINVAL;
+
+ strncpy(stats_addr->version, BNX2FC_VERSION,
+ sizeof(stats_addr->version));
+ stats_addr->txq_size = BNX2FC_SQ_WQES_MAX;
+ stats_addr->rxq_size = BNX2FC_CQ_WQES_MAX;
+
+ return 0;
+}
/**
@@ -1944,6 +1986,7 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
adapter_count++;
mutex_unlock(&bnx2fc_dev_lock);
+ dev->fcoe_cap = &hba->fcoe_cap;
clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
rc = dev->register_device(dev, CNIC_ULP_FCOE,
(void *) hba);
@@ -2643,4 +2686,5 @@ static struct cnic_ulp_ops bnx2fc_cnic_cb = {
.cnic_stop = bnx2fc_ulp_stop,
.indicate_kcqes = bnx2fc_indicate_kcqe,
.indicate_netevent = bnx2fc_indicate_netevent,
+ .cnic_get_stats = bnx2fc_ulp_get_stats,
};
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
index dc0a08e..f2db5fe 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -267,7 +267,13 @@ struct bnx2i_cmd_request {
* task statistics for write response
*/
struct bnx2i_write_resp_task_stat {
- u32 num_data_ins;
+#if defined(__BIG_ENDIAN)
+ u16 num_r2ts;
+ u16 num_data_outs;
+#elif defined(__LITTLE_ENDIAN)
+ u16 num_data_outs;
+ u16 num_r2ts;
+#endif
};
/*
@@ -275,11 +281,11 @@ struct bnx2i_write_resp_task_stat {
*/
struct bnx2i_read_resp_task_stat {
#if defined(__BIG_ENDIAN)
- u16 num_data_outs;
- u16 num_r2ts;
+ u16 reserved;
+ u16 num_data_ins;
#elif defined(__LITTLE_ENDIAN)
- u16 num_r2ts;
- u16 num_data_outs;
+ u16 num_data_ins;
+ u16 reserved;
#endif
};
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 0c53c28..2e32614 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -44,6 +44,8 @@
#include "57xx_iscsi_hsi.h"
#include "57xx_iscsi_constants.h"
+#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h"
+
#define BNX2_ISCSI_DRIVER_NAME "bnx2i"
#define BNX2I_MAX_ADAPTERS 8
@@ -126,6 +128,43 @@
#define REG_WR(__hba, offset, val) \
writel(val, __hba->regview + offset)
+#ifdef CONFIG_32BIT
+#define GET_STATS_64(__hba, dst, field) \
+ do { \
+ spin_lock_bh(&__hba->stat_lock); \
+ dst->field##_lo = __hba->stats.field##_lo; \
+ dst->field##_hi = __hba->stats.field##_hi; \
+ spin_unlock_bh(&__hba->stat_lock); \
+ } while (0)
+
+#define ADD_STATS_64(__hba, field, len) \
+ do { \
+ if (spin_trylock(&__hba->stat_lock)) { \
+ if (__hba->stats.field##_lo + len < \
+ __hba->stats.field##_lo) \
+ __hba->stats.field##_hi++; \
+ __hba->stats.field##_lo += len; \
+ spin_unlock(&__hba->stat_lock); \
+ } \
+ } while (0)
+
+#else
+#define GET_STATS_64(__hba, dst, field) \
+ do { \
+ u64 val, *out; \
+ \
+ val = __hba->bnx2i_stats.field; \
+ out = (u64 *)&__hba->stats.field##_lo; \
+ *out = cpu_to_le64(val); \
+ out = (u64 *)&dst->field##_lo; \
+ *out = cpu_to_le64(val); \
+ } while (0)
+
+#define ADD_STATS_64(__hba, field, len) \
+ do { \
+ __hba->bnx2i_stats.field += len; \
+ } while (0)
+#endif
/**
* struct generic_pdu_resc - login pdu resource structure
@@ -288,6 +327,15 @@ struct iscsi_cid_queue {
struct bnx2i_conn **conn_cid_tbl;
};
+
+struct bnx2i_stats_info {
+ u64 rx_pdus;
+ u64 rx_bytes;
+ u64 tx_pdus;
+ u64 tx_bytes;
+};
+
+
/**
* struct bnx2i_hba - bnx2i adapter structure
*
@@ -341,6 +389,8 @@ struct iscsi_cid_queue {
* @ctx_ccell_tasks: captures number of ccells and tasks supported by
* currently offloaded connection, used to decode
* context memory
+ * @stat_lock: spin lock used by the statistic collector (32 bit)
+ * @stats: local iSCSI statistic collection place holder
*
* Adapter Data Structure
*/
@@ -426,6 +476,12 @@ struct bnx2i_hba {
u32 num_sess_opened;
u32 num_conn_opened;
unsigned int ctx_ccell_tasks;
+
+#ifdef CONFIG_32BIT
+ spinlock_t stat_lock;
+#endif
+ struct bnx2i_stats_info bnx2i_stats;
+ struct iscsi_stats_info stats;
};
@@ -749,6 +805,8 @@ extern void bnx2i_ulp_init(struct cnic_dev *dev);
extern void bnx2i_ulp_exit(struct cnic_dev *dev);
extern void bnx2i_start(void *handle);
extern void bnx2i_stop(void *handle);
+extern int bnx2i_get_stats(void *handle);
+
extern struct bnx2i_hba *get_adapter_list_head(void);
struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index ece47e5..49e8b05 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1350,6 +1350,7 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
struct cqe *cqe)
{
struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct bnx2i_hba *hba = bnx2i_conn->hba;
struct bnx2i_cmd_response *resp_cqe;
struct bnx2i_cmd *bnx2i_cmd;
struct iscsi_task *task;
@@ -1367,16 +1368,26 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
conn->datain_pdus_cnt +=
- resp_cqe->task_stat.read_stat.num_data_outs;
+ resp_cqe->task_stat.read_stat.num_data_ins;
conn->rxdata_octets +=
bnx2i_cmd->req.total_data_transfer_length;
+ ADD_STATS_64(hba, rx_pdus,
+ resp_cqe->task_stat.read_stat.num_data_ins);
+ ADD_STATS_64(hba, rx_bytes,
+ bnx2i_cmd->req.total_data_transfer_length);
} else {
conn->dataout_pdus_cnt +=
- resp_cqe->task_stat.read_stat.num_data_outs;
+ resp_cqe->task_stat.write_stat.num_data_outs;
conn->r2t_pdus_cnt +=
- resp_cqe->task_stat.read_stat.num_r2ts;
+ resp_cqe->task_stat.write_stat.num_r2ts;
conn->txdata_octets +=
bnx2i_cmd->req.total_data_transfer_length;
+ ADD_STATS_64(hba, tx_pdus,
+ resp_cqe->task_stat.write_stat.num_data_outs);
+ ADD_STATS_64(hba, tx_bytes,
+ bnx2i_cmd->req.total_data_transfer_length);
+ ADD_STATS_64(hba, rx_pdus,
+ resp_cqe->task_stat.write_stat.num_r2ts);
}
bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
@@ -1961,6 +1972,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
{
struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session;
+ struct bnx2i_hba *hba = bnx2i_conn->hba;
struct qp_info *qp;
struct bnx2i_nop_in_msg *nopin;
int tgt_async_msg;
@@ -1973,7 +1985,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
if (!qp->cq_virt) {
printk(KERN_ALERT "bnx2i (%s): cq resr freed in bh execution!",
- bnx2i_conn->hba->netdev->name);
+ hba->netdev->name);
goto out;
}
while (1) {
@@ -1985,9 +1997,9 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
if (nopin->op_code == ISCSI_OP_NOOP_IN &&
nopin->itt == (u16) RESERVED_ITT) {
printk(KERN_ALERT "bnx2i: Unsolicited "
- "NOP-In detected for suspended "
- "connection dev=%s!\n",
- bnx2i_conn->hba->netdev->name);
+ "NOP-In detected for suspended "
+ "connection dev=%s!\n",
+ hba->netdev->name);
bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
goto cqe_out;
}
@@ -2001,7 +2013,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
/* Run the kthread engine only for data cmds
All other cmds will be completed in this bh! */
bnx2i_queue_scsi_cmd_resp(session, bnx2i_conn, nopin);
- break;
+ goto done;
case ISCSI_OP_LOGIN_RSP:
bnx2i_process_login_resp(session, bnx2i_conn,
qp->cq_cons_qe);
@@ -2044,11 +2056,15 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
nopin->op_code);
}
+
+ ADD_STATS_64(hba, rx_pdus, 1);
+ ADD_STATS_64(hba, rx_bytes, nopin->data_length);
+done:
if (!tgt_async_msg) {
if (!atomic_read(&bnx2i_conn->ep->num_active_cmds))
printk(KERN_ALERT "bnx2i (%s): no active cmd! "
"op 0x%x\n",
- bnx2i_conn->hba->netdev->name,
+ hba->netdev->name,
nopin->op_code);
else
atomic_dec(&bnx2i_conn->ep->num_active_cmds);
@@ -2692,6 +2708,7 @@ struct cnic_ulp_ops bnx2i_cnic_cb = {
.cm_remote_close = bnx2i_cm_remote_close,
.cm_remote_abort = bnx2i_cm_remote_abort,
.iscsi_nl_send_msg = bnx2i_send_nl_mesg,
+ .cnic_get_stats = bnx2i_get_stats,
.owner = THIS_MODULE
};
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 8b68167..b17637a 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -381,6 +381,46 @@ void bnx2i_ulp_exit(struct cnic_dev *dev)
/**
+ * bnx2i_get_stats - Retrieve various statistic from iSCSI offload
+ * @handle: bnx2i_hba
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ * retrieve various iSCSI offload related statistics.
+ */
+int bnx2i_get_stats(void *handle)
+{
+ struct bnx2i_hba *hba = handle;
+ struct iscsi_stats_info *stats;
+
+ if (!hba)
+ return -EINVAL;
+
+ stats = (struct iscsi_stats_info *)hba->cnic->stats_addr;
+
+ if (!stats)
+ return -ENOMEM;
+
+ strlcpy(stats->version, DRV_MODULE_VERSION, sizeof(stats->version));
+ memcpy(stats->mac_add1 + 2, hba->cnic->mac_addr, ETH_ALEN);
+
+ stats->max_frame_size = hba->netdev->mtu;
+ stats->txq_size = hba->max_sqes;
+ stats->rxq_size = hba->max_cqes;
+
+ stats->txq_avg_depth = 0;
+ stats->rxq_avg_depth = 0;
+
+ GET_STATS_64(hba, stats, rx_pdus);
+ GET_STATS_64(hba, stats, rx_bytes);
+
+ GET_STATS_64(hba, stats, tx_pdus);
+ GET_STATS_64(hba, stats, tx_bytes);
+
+ return 0;
+}
+
+
+/**
* bnx2i_percpu_thread_create - Create a receive thread for an
* online CPU
*
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index f8d516b..b40ac01 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -874,6 +874,11 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
hba->conn_ctx_destroy_tmo = 2 * HZ;
}
+#ifdef CONFIG_32BIT
+ spin_lock_init(&hba->stat_lock);
+#endif
+ memset(&hba->stats, 0, sizeof(struct iscsi_stats_info));
+
if (iscsi_host_add(shost, &hba->pcidev->dev))
goto free_dump_mem;
return hba;
@@ -1181,12 +1186,18 @@ static int
bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
{
struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+ struct bnx2i_hba *hba = bnx2i_conn->hba;
struct bnx2i_cmd *cmd = task->dd_data;
memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
bnx2i_setup_cmd_wqe_template(cmd);
bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
+
+ /* Tx PDU/data length count */
+ ADD_STATS_64(hba, tx_pdus, 1);
+ ADD_STATS_64(hba, tx_bytes, task->data_count);
+
if (task->data_count) {
memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
task->data_count);
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 36739da..49692a1 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -966,7 +966,8 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->saddr.sin_addr.s_addr = chba->ipv4addr;
csk->rss_qid = 0;
- csk->l2t = t3_l2t_get(t3dev, dst, ndev);
+ csk->l2t = t3_l2t_get(t3dev, dst, ndev,
+ &csk->daddr.sin_addr.s_addr);
if (!csk->l2t) {
pr_err("NO l2t available.\n");
return -EINVAL;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 5a4a3bf..cc9a068 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1142,7 +1142,7 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
- n = dst_get_neighbour_noref(csk->dst);
+ n = dst_neigh_lookup(csk->dst, &csk->daddr.sin_addr.s_addr);
if (!n) {
pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
goto rel_resource;
@@ -1182,9 +1182,12 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
send_act_open_req(csk, skb, csk->l2t);
+ neigh_release(n);
return 0;
rel_resource:
+ if (n)
+ neigh_release(n);
if (skb)
__kfree_skb(skb);
return -EINVAL;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index d9253db..b44c1cf 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -494,7 +494,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
goto err_out;
}
dst = &rt->dst;
- n = dst_get_neighbour_noref(dst);
+ n = dst_neigh_lookup(dst, &daddr->sin_addr.s_addr);
if (!n) {
err = -ENODEV;
goto rel_rt;
@@ -506,7 +506,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
&daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
ndev->name);
err = -ENETUNREACH;
- goto rel_rt;
+ goto rel_neigh;
}
if (ndev->flags & IFF_LOOPBACK) {
@@ -521,7 +521,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
pr_info("dst %pI4, %s, NOT cxgbi device.\n",
&daddr->sin_addr.s_addr, ndev->name);
err = -ENETUNREACH;
- goto rel_rt;
+ goto rel_neigh;
}
log_debug(1 << CXGBI_DBG_SOCK,
"route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n",
@@ -531,7 +531,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
csk = cxgbi_sock_create(cdev);
if (!csk) {
err = -ENOMEM;
- goto rel_rt;
+ goto rel_neigh;
}
csk->cdev = cdev;
csk->port_id = port;
@@ -541,9 +541,13 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
csk->daddr.sin_port = daddr->sin_port;
csk->daddr.sin_family = daddr->sin_family;
csk->saddr.sin_addr.s_addr = fl4.saddr;
+ neigh_release(n);
return csk;
+rel_neigh:
+ neigh_release(n);
+
rel_rt:
ip_rt_put(rt);
if (csk)
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 6102ef2..9d46fcb 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -1792,7 +1792,7 @@ static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
static inline u8
_base_get_msix_index(struct MPT2SAS_ADAPTER *ioc)
{
- return ioc->cpu_msix_table[smp_processor_id()];
+ return ioc->cpu_msix_table[raw_smp_processor_id()];
}
/**
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 04f80eb..6986552 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -26,7 +26,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
@@ -2477,11 +2476,9 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
}
cmd = qlt_ctio_to_cmd(vha, handle, ctio);
- if (cmd == NULL) {
- if (status != CTIO_SUCCESS)
- qlt_term_ctio_exchange(vha, ctio, NULL, status);
+ if (cmd == NULL)
return;
- }
+
se_cmd = &cmd->se_cmd;
tfo = se_cmd->se_tfo;
@@ -2727,10 +2724,12 @@ static void qlt_do_work(struct work_struct *work)
out_term:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd);
/*
- * cmd has not sent to target yet, so pass NULL as the second argument
+ * cmd has not sent to target yet, so pass NULL as the second
+ * argument to qlt_send_term_exchange() and free the memory here.
*/
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
+ kmem_cache_free(qla_tgt_cmd_cachep, cmd);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (sess)
ha->tgt.tgt_ops->put_sess(sess);
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 9ec19bc..9f9ef16 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -919,7 +919,6 @@ struct qla_tgt_srr_ctio {
#define QLA_TGT_XMIT_STATUS 2
#define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
-#include <linux/version.h>
extern struct qla_tgt_data qla_target;
/*
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 436598f..6e64314 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -137,13 +137,15 @@ static char *tcm_qla2xxx_get_fabric_name(void)
*/
static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm)
{
- unsigned int i, j, value;
+ unsigned int i, j;
u8 wwn[8];
memset(wwn, 0, sizeof(wwn));
/* Validate and store the new name */
for (i = 0, j = 0; i < 16; i++) {
+ int value;
+
value = hex_to_bin(*ns++);
if (value >= 0)
j = (j << 4) | value;
@@ -652,8 +654,8 @@ static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
/*
* Called from qla_target.c:qlt_issue_task_mgmt()
*/
-int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
- uint8_t tmr_func, uint32_t tag)
+static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
+ uint8_t tmr_func, uint32_t tag)
{
struct qla_tgt_sess *sess = mcmd->sess;
struct se_cmd *se_cmd = &mcmd->se_cmd;
@@ -762,65 +764,8 @@ static u16 tcm_qla2xxx_set_fabric_sense_len(struct se_cmd *se_cmd,
struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
-static int tcm_qla2xxx_setup_nacl_from_rport(
- struct se_portal_group *se_tpg,
- struct se_node_acl *se_nacl,
- struct tcm_qla2xxx_lport *lport,
- struct tcm_qla2xxx_nacl *nacl,
- u64 rport_wwnn)
-{
- struct scsi_qla_host *vha = lport->qla_vha;
- struct Scsi_Host *sh = vha->host;
- struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
- struct fc_rport *rport;
- unsigned long flags;
- void *node;
- int rc;
-
- /*
- * Scan the existing rports, and create a session for the
- * explict NodeACL is an matching rport->node_name already
- * exists.
- */
- spin_lock_irqsave(sh->host_lock, flags);
- list_for_each_entry(rport, &fc_host->rports, peers) {
- if (rport_wwnn != rport->node_name)
- continue;
-
- pr_debug("Located existing rport_wwpn and rport->node_name: 0x%016LX, port_id: 0x%04x\n",
- rport->node_name, rport->port_id);
- nacl->nport_id = rport->port_id;
-
- spin_unlock_irqrestore(sh->host_lock, flags);
-
- spin_lock_irqsave(&vha->hw->hardware_lock, flags);
- node = btree_lookup32(&lport->lport_fcport_map, rport->port_id);
- if (node) {
- rc = btree_update32(&lport->lport_fcport_map,
- rport->port_id, se_nacl);
- } else {
- rc = btree_insert32(&lport->lport_fcport_map,
- rport->port_id, se_nacl,
- GFP_ATOMIC);
- }
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
-
- if (rc) {
- pr_err("Unable to insert se_nacl into fcport_map");
- WARN_ON(rc > 0);
- return rc;
- }
-
- pr_debug("Inserted into fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%08x\n",
- se_nacl, rport_wwnn, nacl->nport_id);
-
- return 1;
- }
- spin_unlock_irqrestore(sh->host_lock, flags);
-
- return 0;
-}
-
+static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
+ struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
/*
* Expected to be called with struct qla_hw_data->hardware_lock held
*/
@@ -842,11 +787,40 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
se_nacl, nacl->nport_wwnn, nacl->nport_id);
+ /*
+ * Now clear the se_nacl and session pointers from our HW lport lookup
+ * table mapping for this initiator's fabric S_ID and LOOP_ID entries.
+ *
+ * This is done ahead of callbacks into tcm_qla2xxx_free_session() ->
+ * target_wait_for_sess_cmds() before the session waits for outstanding
+ * I/O to complete, to avoid a race between session shutdown execution
+ * and incoming ATIOs or TMRs picking up a stale se_node_act reference.
+ */
+ tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess);
+}
+
+static void tcm_qla2xxx_release_session(struct kref *kref)
+{
+ struct se_session *se_sess = container_of(kref,
+ struct se_session, sess_kref);
+
+ qlt_unreg_sess(se_sess->fabric_sess_ptr);
+}
+
+static void tcm_qla2xxx_put_session(struct se_session *se_sess)
+{
+ struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+ struct qla_hw_data *ha = sess->vha->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
{
- target_put_session(sess->se_sess);
+ tcm_qla2xxx_put_session(sess->se_sess);
}
static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
@@ -859,14 +833,10 @@ static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
struct config_group *group,
const char *name)
{
- struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
- struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
- struct tcm_qla2xxx_lport, lport_wwn);
struct se_node_acl *se_nacl, *se_nacl_new;
struct tcm_qla2xxx_nacl *nacl;
u64 wwnn;
u32 qla2xxx_nexus_depth;
- int rc;
if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0)
return ERR_PTR(-EINVAL);
@@ -893,16 +863,6 @@ static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
nacl->nport_wwnn = wwnn;
tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn);
- /*
- * Setup a se_nacl handle based on an a matching struct fc_rport setup
- * via drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
- */
- rc = tcm_qla2xxx_setup_nacl_from_rport(se_tpg, se_nacl, lport,
- nacl, wwnn);
- if (rc < 0) {
- tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
- return ERR_PTR(rc);
- }
return se_nacl;
}
@@ -1390,6 +1350,25 @@ static void tcm_qla2xxx_set_sess_by_loop_id(
nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
}
+/*
+ * Should always be called with qla_hw_data->hardware_lock held.
+ */
+static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
+ struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess)
+{
+ struct se_session *se_sess = sess->se_sess;
+ unsigned char be_sid[3];
+
+ be_sid[0] = sess->s_id.b.domain;
+ be_sid[1] = sess->s_id.b.area;
+ be_sid[2] = sess->s_id.b.al_pa;
+
+ tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
+ sess, be_sid);
+ tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
+ sess, sess->loop_id);
+}
+
static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
{
struct qla_tgt *tgt = sess->tgt;
@@ -1398,8 +1377,6 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
struct se_node_acl *se_nacl;
struct tcm_qla2xxx_lport *lport;
struct tcm_qla2xxx_nacl *nacl;
- unsigned char be_sid[3];
- unsigned long flags;
BUG_ON(in_interrupt());
@@ -1419,21 +1396,6 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
return;
}
target_wait_for_sess_cmds(se_sess, 0);
- /*
- * And now clear the se_nacl and session pointers from our HW lport
- * mappings for fabric S_ID and LOOP_ID.
- */
- memset(&be_sid, 0, 3);
- be_sid[0] = sess->s_id.b.domain;
- be_sid[1] = sess->s_id.b.area;
- be_sid[2] = sess->s_id.b.al_pa;
-
- spin_lock_irqsave(&ha->hardware_lock, flags);
- tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
- sess, be_sid);
- tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
- sess, sess->loop_id);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
transport_deregister_session_configfs(sess->se_sess);
transport_deregister_session(sess->se_sess);
@@ -1731,6 +1693,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
.new_cmd_map = NULL,
.check_stop_free = tcm_qla2xxx_check_stop_free,
.release_cmd = tcm_qla2xxx_release_cmd,
+ .put_session = tcm_qla2xxx_put_session,
.shutdown_session = tcm_qla2xxx_shutdown_session,
.close_session = tcm_qla2xxx_close_session,
.sess_get_index = tcm_qla2xxx_sess_get_index,
@@ -1779,6 +1742,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
.tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
.release_cmd = tcm_qla2xxx_release_cmd,
+ .put_session = tcm_qla2xxx_put_session,
.shutdown_session = tcm_qla2xxx_shutdown_session,
.close_session = tcm_qla2xxx_close_session,
.sess_get_index = tcm_qla2xxx_sess_get_index,
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 61c82a3..bbbc9c91 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -90,11 +90,9 @@ unsigned int scsi_logging_level;
EXPORT_SYMBOL(scsi_logging_level);
#endif
-#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_BLK_DEV_SD)
-/* sd and scsi_pm need to coordinate flushing async actions */
+/* sd, scsi core and power management need to coordinate flushing async actions */
LIST_HEAD(scsi_sd_probe_domain);
EXPORT_SYMBOL(scsi_sd_probe_domain);
-#endif
/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
* You may not alter any existing entry (although adding new ones is
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index c77628a..8818dd6 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -486,6 +486,10 @@ void
scsi_netlink_init(void)
{
int error;
+ struct netlink_kernel_cfg cfg = {
+ .input = scsi_nl_rcv_msg,
+ .groups = SCSI_NL_GRP_CNT,
+ };
INIT_LIST_HEAD(&scsi_nl_drivers);
@@ -497,8 +501,7 @@ scsi_netlink_init(void)
}
scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT,
- SCSI_NL_GRP_CNT, scsi_nl_rcv_msg, NULL,
- THIS_MODULE);
+ THIS_MODULE, &cfg);
if (!scsi_nl_sock) {
printk(KERN_ERR "%s: register of receive handler failed\n",
__func__);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 1cf640e..6042954 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2936,7 +2936,10 @@ EXPORT_SYMBOL_GPL(iscsi_unregister_transport);
static __init int iscsi_transport_init(void)
{
int err;
-
+ struct netlink_kernel_cfg cfg = {
+ .groups = 1,
+ .input = iscsi_if_rx,
+ };
printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
ISCSI_TRANSPORT_VERSION);
@@ -2966,8 +2969,8 @@ static __init int iscsi_transport_init(void)
if (err)
goto unregister_conn_class;
- nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx,
- NULL, THIS_MODULE);
+ nls = netlink_kernel_create(&init_net, NETLINK_ISCSI,
+ THIS_MODULE, &cfg);
if (!nls) {
err = -ENOBUFS;
goto unregister_session_class;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 6f0a4c6..6f72b80 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1899,6 +1899,8 @@ static int sd_try_rc16_first(struct scsi_device *sdp)
{
if (sdp->host->max_cmd_len < 16)
return 0;
+ if (sdp->try_rc_10_first)
+ return 0;
if (sdp->scsi_level > SCSI_SPC_2)
return 1;
if (scsi_device_protection(sdp))
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 46ef5fe..0c73dd4 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -801,7 +801,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
if (!cs) {
- cs = devm_kzalloc(&spi->dev , sizeof *cs, GFP_KERNEL);
+ cs = kzalloc(sizeof *cs, GFP_KERNEL);
if (!cs)
return -ENOMEM;
cs->base = mcspi->base + spi->chip_select * 0x14;
@@ -842,6 +842,7 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
cs = spi->controller_state;
list_del(&cs->node);
+ kfree(cs);
}
if (spi->chip_select < spi->master->num_chipselect) {
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 1c3d638..aeac1ca 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -30,6 +30,7 @@
#include <linux/pci.h>
#include <linux/usb.h>
#include <linux/errno.h>
+#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fcntl.h>
@@ -981,6 +982,8 @@ void comedi_pci_driver_unregister(struct comedi_driver *comedi_driver,
}
EXPORT_SYMBOL_GPL(comedi_pci_driver_unregister);
+#if IS_ENABLED(CONFIG_USB)
+
static int comedi_old_usb_auto_config(struct usb_interface *intf,
struct comedi_driver *driver)
{
@@ -1043,3 +1046,5 @@ void comedi_usb_driver_unregister(struct comedi_driver *comedi_driver,
comedi_driver_unregister(comedi_driver);
}
EXPORT_SYMBOL_GPL(comedi_usb_driver_unregister);
+
+#endif
diff --git a/drivers/staging/gdm72xx/netlink_k.c b/drivers/staging/gdm72xx/netlink_k.c
index 292af0f..87c3a07 100644
--- a/drivers/staging/gdm72xx/netlink_k.c
+++ b/drivers/staging/gdm72xx/netlink_k.c
@@ -88,13 +88,15 @@ struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type,
void *msg, int len))
{
struct sock *sock;
+ struct netlink_kernel_cfg cfg = {
+ .input = netlink_rcv,
+ };
#if !defined(DEFINE_MUTEX)
init_MUTEX(&netlink_mutex);
#endif
- sock = netlink_kernel_create(&init_net, unit, 0, netlink_rcv, NULL,
- THIS_MODULE);
+ sock = netlink_kernel_create(&init_net, unit, THIS_MODULE, &cfg);
if (sock)
rcv_cb = cb;
@@ -104,7 +106,7 @@ struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type,
void netlink_exit(struct sock *sock)
{
- sock_release(sock->sk_socket);
+ netlink_kernel_release(sock);
}
int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
@@ -127,8 +129,12 @@ int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
}
seq++;
- nlh = NLMSG_PUT(skb, 0, seq, type, len);
- memcpy(NLMSG_DATA(nlh), msg, len);
+ nlh = nlmsg_put(skb, 0, seq, type, len, 0);
+ if (!nlh) {
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
+ memcpy(nlmsg_data(nlh), msg, len);
NETLINK_CB(skb).pid = 0;
NETLINK_CB(skb).dst_group = 0;
@@ -144,7 +150,5 @@ int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
}
ret = 0;
}
-
-nlmsg_failure:
return ret;
}
diff --git a/drivers/staging/iio/Documentation/device.txt b/drivers/staging/iio/Documentation/device.txt
index 0338c7c..f03fbd3 100644
--- a/drivers/staging/iio/Documentation/device.txt
+++ b/drivers/staging/iio/Documentation/device.txt
@@ -29,8 +29,6 @@ Then fill in the following:
* info->driver_module:
Set to THIS_MODULE. Used to ensure correct ownership
of various resources allocate by the core.
- * info->num_interrupt_lines:
- Number of event triggering hardware lines the device has.
* info->event_attrs:
Attributes used to enable / disable hardware events.
* info->attrs:
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index 2490dd2..8f1b3af 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -13,6 +13,7 @@ config AD7291
config AD7298
tristate "Analog Devices AD7298 ADC driver"
depends on SPI
+ select IIO_KFIFO_BUF if IIO_BUFFER
help
Say yes here to build support for Analog Devices AD7298
8 Channel ADC with temperature sensor.
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index 10ab6dc..a13afff 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -235,7 +235,8 @@ static const struct attribute_group ad7606_attribute_group_range = {
.indexed = 1, \
.channel = num, \
.address = num, \
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT, \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.scan_index = num, \
.scan_type = IIO_ST('s', 16, 16, 0), \
}
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
index 3295ea6..97ef670 100644
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ b/drivers/staging/media/lirc/lirc_serial.c
@@ -129,6 +129,7 @@ static void send_space_homebrew(long length);
static struct lirc_serial hardware[] = {
[LIRC_HOMEBREW] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_HOMEBREW].lock),
.signal_pin = UART_MSR_DCD,
.signal_pin_change = UART_MSR_DDCD,
.on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
@@ -145,6 +146,7 @@ static struct lirc_serial hardware[] = {
},
[LIRC_IRDEO] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO].lock),
.signal_pin = UART_MSR_DSR,
.signal_pin_change = UART_MSR_DDSR,
.on = UART_MCR_OUT2,
@@ -156,6 +158,7 @@ static struct lirc_serial hardware[] = {
},
[LIRC_IRDEO_REMOTE] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO_REMOTE].lock),
.signal_pin = UART_MSR_DSR,
.signal_pin_change = UART_MSR_DDSR,
.on = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
@@ -167,6 +170,7 @@ static struct lirc_serial hardware[] = {
},
[LIRC_ANIMAX] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_ANIMAX].lock),
.signal_pin = UART_MSR_DCD,
.signal_pin_change = UART_MSR_DDCD,
.on = 0,
@@ -177,6 +181,7 @@ static struct lirc_serial hardware[] = {
},
[LIRC_IGOR] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IGOR].lock),
.signal_pin = UART_MSR_DSR,
.signal_pin_change = UART_MSR_DDSR,
.on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
@@ -201,6 +206,7 @@ static struct lirc_serial hardware[] = {
* See also http://www.nslu2-linux.org for this device
*/
[LIRC_NSLU2] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_NSLU2].lock),
.signal_pin = UART_MSR_CTS,
.signal_pin_change = UART_MSR_DCTS,
.on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
diff --git a/drivers/staging/omapdrm/omap_fbdev.c b/drivers/staging/omapdrm/omap_fbdev.c
index 11acd4c..8c6ed3b 100644
--- a/drivers/staging/omapdrm/omap_fbdev.c
+++ b/drivers/staging/omapdrm/omap_fbdev.c
@@ -208,7 +208,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
*/
ret = omap_gem_get_paddr(fbdev->bo, &paddr, true);
if (ret) {
- dev_err(dev->dev, "could not map (paddr)!\n");
+ dev_err(dev->dev,
+ "could not map (paddr)! Skipping framebuffer alloc\n");
ret = -ENOMEM;
goto fail;
}
@@ -388,8 +389,11 @@ void omap_fbdev_free(struct drm_device *dev)
fbi = helper->fbdev;
- unregister_framebuffer(fbi);
- framebuffer_release(fbi);
+ /* only cleanup framebuffer if it is present */
+ if (fbi) {
+ unregister_framebuffer(fbi);
+ framebuffer_release(fbi);
+ }
drm_fb_helper_fini(helper);
diff --git a/drivers/staging/ramster/zcache-main.c b/drivers/staging/ramster/zcache-main.c
index 4e7ef0e..d46764b 100644
--- a/drivers/staging/ramster/zcache-main.c
+++ b/drivers/staging/ramster/zcache-main.c
@@ -3002,7 +3002,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
return oid;
}
-static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_store(unsigned type, pgoff_t offset,
struct page *page)
{
u64 ind64 = (u64)offset;
@@ -3025,7 +3025,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
/* returns 0 if the page was successfully gotten from frontswap, -1 if
* was not present (should never happen!) */
-static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_load(unsigned type, pgoff_t offset,
struct page *page)
{
u64 ind64 = (u64)offset;
@@ -3080,8 +3080,8 @@ static void zcache_frontswap_init(unsigned ignored)
}
static struct frontswap_ops zcache_frontswap_ops = {
- .put_page = zcache_frontswap_put_page,
- .get_page = zcache_frontswap_get_page,
+ .store = zcache_frontswap_store,
+ .load = zcache_frontswap_load,
.invalidate_page = zcache_frontswap_flush_page,
.invalidate_area = zcache_frontswap_flush_area,
.init = zcache_frontswap_init
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index 9bd18e2..69f616c 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -102,6 +102,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
/* - */
{USB_DEVICE(0x20F4, 0x646B)},
{USB_DEVICE(0x083A, 0xC512)},
+ {USB_DEVICE(0x25D4, 0x4CA1)},
+ {USB_DEVICE(0x25D4, 0x4CAB)},
/* RTL8191SU */
/* Realtek */
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 2734dac..784c796 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -1835,7 +1835,7 @@ static int zcache_frontswap_poolid = -1;
* Swizzling increases objects per swaptype, increasing tmem concurrency
* for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
* Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
- * frontswap_get_page(), but has side-effects. Hence using 8.
+ * frontswap_load(), but has side-effects. Hence using 8.
*/
#define SWIZ_BITS 8
#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
@@ -1849,7 +1849,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
return oid;
}
-static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_store(unsigned type, pgoff_t offset,
struct page *page)
{
u64 ind64 = (u64)offset;
@@ -1870,7 +1870,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
/* returns 0 if the page was successfully gotten from frontswap, -1 if
* was not present (should never happen!) */
-static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_load(unsigned type, pgoff_t offset,
struct page *page)
{
u64 ind64 = (u64)offset;
@@ -1919,8 +1919,8 @@ static void zcache_frontswap_init(unsigned ignored)
}
static struct frontswap_ops zcache_frontswap_ops = {
- .put_page = zcache_frontswap_put_page,
- .get_page = zcache_frontswap_get_page,
+ .store = zcache_frontswap_store,
+ .load = zcache_frontswap_load,
.invalidate_page = zcache_frontswap_flush_page,
.invalidate_area = zcache_frontswap_flush_area,
.init = zcache_frontswap_init
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 37c6098..7e6136e 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -587,14 +587,14 @@ static void sbp_management_request_logout(
{
struct sbp_tport *tport = agent->tport;
struct sbp_tpg *tpg = tport->tpg;
- int login_id;
+ int id;
struct sbp_login_descriptor *login;
- login_id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
+ id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
- login = sbp_login_find_by_id(tpg, login_id);
+ login = sbp_login_find_by_id(tpg, id);
if (!login) {
- pr_warn("cannot find login: %d\n", login_id);
+ pr_warn("cannot find login: %d\n", id);
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index e624b83..9179997 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -374,8 +374,9 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
out:
transport_kunmap_data_sg(cmd);
- target_complete_cmd(cmd, GOOD);
- return 0;
+ if (!rc)
+ target_complete_cmd(cmd, GOOD);
+ return rc;
}
static inline int core_alua_state_nonoptimized(
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 686dba1..9f99d04 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -133,16 +133,11 @@ static struct se_device *fd_create_virtdevice(
ret = PTR_ERR(dev_p);
goto fail;
}
-
- /* O_DIRECT too? */
- flags = O_RDWR | O_CREAT | O_LARGEFILE;
-
/*
- * If fd_buffered_io=1 has not been set explicitly (the default),
- * use O_SYNC to force FILEIO writes to disk.
+ * Use O_DSYNC by default instead of O_SYNC to forgo syncing
+ * of pure timestamp updates.
*/
- if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
- flags |= O_SYNC;
+ flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
file = filp_open(dev_p, flags, 0600);
if (IS_ERR(file)) {
@@ -380,23 +375,6 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd)
}
}
-static void fd_emulate_write_fua(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- struct fd_dev *fd_dev = dev->dev_ptr;
- loff_t start = cmd->t_task_lba *
- dev->se_sub_dev->se_dev_attrib.block_size;
- loff_t end = start + cmd->data_length;
- int ret;
-
- pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
- cmd->t_task_lba, cmd->data_length);
-
- ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
- if (ret != 0)
- pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
-}
-
static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
u32 sgl_nents, enum dma_data_direction data_direction)
{
@@ -411,19 +389,21 @@ static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
ret = fd_do_readv(cmd, sgl, sgl_nents);
} else {
ret = fd_do_writev(cmd, sgl, sgl_nents);
-
+ /*
+ * Perform implict vfs_fsync_range() for fd_do_writev() ops
+ * for SCSI WRITEs with Forced Unit Access (FUA) set.
+ * Allow this to happen independent of WCE=0 setting.
+ */
if (ret > 0 &&
- dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
(cmd->se_cmd_flags & SCF_FUA)) {
- /*
- * We might need to be a bit smarter here
- * and return some sense data to let the initiator
- * know the FUA WRITE cache sync failed..?
- */
- fd_emulate_write_fua(cmd);
- }
+ struct fd_dev *fd_dev = dev->dev_ptr;
+ loff_t start = cmd->t_task_lba *
+ dev->se_sub_dev->se_dev_attrib.block_size;
+ loff_t end = start + cmd->data_length;
+ vfs_fsync_range(fd_dev->fd_file, start, end, 1);
+ }
}
if (ret < 0) {
@@ -442,7 +422,6 @@ enum {
static match_table_t tokens = {
{Opt_fd_dev_name, "fd_dev_name=%s"},
{Opt_fd_dev_size, "fd_dev_size=%s"},
- {Opt_fd_buffered_io, "fd_buffered_io=%d"},
{Opt_err, NULL}
};
@@ -454,7 +433,7 @@ static ssize_t fd_set_configfs_dev_params(
struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
- int ret = 0, arg, token;
+ int ret = 0, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
@@ -498,19 +477,6 @@ static ssize_t fd_set_configfs_dev_params(
" bytes\n", fd_dev->fd_dev_size);
fd_dev->fbd_flags |= FBDF_HAS_SIZE;
break;
- case Opt_fd_buffered_io:
- match_int(args, &arg);
- if (arg != 1) {
- pr_err("bogus fd_buffered_io=%d value\n", arg);
- ret = -EINVAL;
- goto out;
- }
-
- pr_debug("FILEIO: Using buffered I/O"
- " operations for struct fd_dev\n");
-
- fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
- break;
default:
break;
}
@@ -542,10 +508,8 @@ static ssize_t fd_show_configfs_dev_params(
ssize_t bl = 0;
bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
- bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
- fd_dev->fd_dev_name, fd_dev->fd_dev_size,
- (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
- "Buffered" : "Synchronous");
+ bl += sprintf(b + bl, " File: %s Size: %llu Mode: O_DSYNC\n",
+ fd_dev->fd_dev_name, fd_dev->fd_dev_size);
return bl;
}
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index fbd59ef..70ce7fd 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -14,7 +14,6 @@
#define FBDF_HAS_PATH 0x01
#define FBDF_HAS_SIZE 0x02
-#define FDBD_USE_BUFFERED_IO 0x04
struct fd_dev {
u32 fbd_flags;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index b05fdc0..634d0f3 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -315,7 +315,7 @@ void transport_register_session(
}
EXPORT_SYMBOL(transport_register_session);
-static void target_release_session(struct kref *kref)
+void target_release_session(struct kref *kref)
{
struct se_session *se_sess = container_of(kref,
struct se_session, sess_kref);
@@ -332,6 +332,12 @@ EXPORT_SYMBOL(target_get_session);
void target_put_session(struct se_session *se_sess)
{
+ struct se_portal_group *tpg = se_sess->se_tpg;
+
+ if (tpg->se_tpg_tfo->put_session != NULL) {
+ tpg->se_tpg_tfo->put_session(se_sess);
+ return;
+ }
kref_put(&se_sess->sess_kref, target_release_session);
}
EXPORT_SYMBOL(target_put_session);
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index d3d91da..944eaeb 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -214,24 +214,24 @@ static int xen_hvm_console_init(void)
/* already configured */
if (info->intf != NULL)
return 0;
-
+ /*
+ * If the toolstack (or the hypervisor) hasn't set these values, the
+ * default value is 0. Even though mfn = 0 and evtchn = 0 are
+ * theoretically correct values, in practice they never are and they
+ * mean that a legacy toolstack hasn't initialized the pv console correctly.
+ */
r = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
- if (r < 0) {
- kfree(info);
- return -ENODEV;
- }
+ if (r < 0 || v == 0)
+ goto err;
info->evtchn = v;
- hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v);
- if (r < 0) {
- kfree(info);
- return -ENODEV;
- }
+ v = 0;
+ r = hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v);
+ if (r < 0 || v == 0)
+ goto err;
mfn = v;
info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE);
- if (info->intf == NULL) {
- kfree(info);
- return -ENODEV;
- }
+ if (info->intf == NULL)
+ goto err;
info->vtermno = HVC_COOKIE;
spin_lock(&xencons_lock);
@@ -239,6 +239,9 @@ static int xen_hvm_console_init(void)
spin_unlock(&xencons_lock);
return 0;
+err:
+ kfree(info);
+ return -ENODEV;
}
static int xen_pv_console_init(void)
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c
index 47d061b..6e1958a 100644
--- a/drivers/tty/serial/8250/8250.c
+++ b/drivers/tty/serial/8250/8250.c
@@ -3113,7 +3113,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *
/**
* serial8250_register_8250_port - register a serial port
- * @port: serial port template
+ * @up: serial port template
*
* Configure the serial port specified by the request. If the
* port exists and is in use, it is hung up and unregistered
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 4ad721f..c17923e 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -133,6 +133,10 @@ struct pl011_dmatx_data {
struct uart_amba_port {
struct uart_port port;
struct clk *clk;
+ /* Two optional pin states - default & sleep */
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_sleep;
const struct vendor_data *vendor;
unsigned int dmacr; /* dma control reg */
unsigned int im; /* interrupt mask */
@@ -1312,6 +1316,14 @@ static int pl011_startup(struct uart_port *port)
unsigned int cr;
int retval;
+ /* Optionaly enable pins to be muxed in and configured */
+ if (!IS_ERR(uap->pins_default)) {
+ retval = pinctrl_select_state(uap->pinctrl, uap->pins_default);
+ if (retval)
+ dev_err(port->dev,
+ "could not set default pins\n");
+ }
+
retval = clk_prepare(uap->clk);
if (retval)
goto out;
@@ -1420,6 +1432,7 @@ static void pl011_shutdown(struct uart_port *port)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
unsigned int cr;
+ int retval;
/*
* disable all interrupts
@@ -1462,6 +1475,14 @@ static void pl011_shutdown(struct uart_port *port)
*/
clk_disable(uap->clk);
clk_unprepare(uap->clk);
+ /* Optionally let pins go into sleep states */
+ if (!IS_ERR(uap->pins_sleep)) {
+ retval = pinctrl_select_state(uap->pinctrl, uap->pins_sleep);
+ if (retval)
+ dev_err(port->dev,
+ "could not set pins to sleep state\n");
+ }
+
if (uap->port.dev->platform_data) {
struct amba_pl011_data *plat;
@@ -1792,6 +1813,14 @@ static int __init pl011_console_setup(struct console *co, char *options)
if (!uap)
return -ENODEV;
+ /* Allow pins to be muxed in and configured */
+ if (!IS_ERR(uap->pins_default)) {
+ ret = pinctrl_select_state(uap->pinctrl, uap->pins_default);
+ if (ret)
+ dev_err(uap->port.dev,
+ "could not set default pins\n");
+ }
+
ret = clk_prepare(uap->clk);
if (ret)
return ret;
@@ -1844,7 +1873,6 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
struct vendor_data *vendor = id->data;
- struct pinctrl *pinctrl;
void __iomem *base;
int i, ret;
@@ -1869,11 +1897,20 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
goto free;
}
- pinctrl = devm_pinctrl_get_select_default(&dev->dev);
- if (IS_ERR(pinctrl)) {
- ret = PTR_ERR(pinctrl);
+ uap->pinctrl = devm_pinctrl_get(&dev->dev);
+ if (IS_ERR(uap->pinctrl)) {
+ ret = PTR_ERR(uap->pinctrl);
goto unmap;
}
+ uap->pins_default = pinctrl_lookup_state(uap->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(uap->pins_default))
+ dev_err(&dev->dev, "could not get default pinstate\n");
+
+ uap->pins_sleep = pinctrl_lookup_state(uap->pinctrl,
+ PINCTRL_STATE_SLEEP);
+ if (IS_ERR(uap->pins_sleep))
+ dev_dbg(&dev->dev, "could not get sleep pinstate\n");
uap->clk = clk_get(&dev->dev, NULL);
if (IS_ERR(uap->clk)) {
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index 34bd345..6ae2a58 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -466,7 +466,7 @@ static void serial_txx9_break_ctl(struct uart_port *port, int break_state)
spin_unlock_irqrestore(&up->port.lock, flags);
}
-#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || (CONFIG_CONSOLE_POLL)
+#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
/*
* Wait for transmitter & holding register to empty
*/
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 4604153..1bd9163 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2179,6 +2179,16 @@ static int __devinit sci_init_single(struct platform_device *dev,
return 0;
}
+static void sci_cleanup_single(struct sci_port *port)
+{
+ sci_free_gpios(port);
+
+ clk_put(port->iclk);
+ clk_put(port->fclk);
+
+ pm_runtime_disable(port->port.dev);
+}
+
#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
static void serial_console_putchar(struct uart_port *port, int ch)
{
@@ -2360,14 +2370,10 @@ static int sci_remove(struct platform_device *dev)
cpufreq_unregister_notifier(&port->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
- sci_free_gpios(port);
-
uart_remove_one_port(&sci_uart_driver, &port->port);
- clk_put(port->iclk);
- clk_put(port->fclk);
+ sci_cleanup_single(port);
- pm_runtime_disable(&dev->dev);
return 0;
}
@@ -2385,14 +2391,20 @@ static int __devinit sci_probe_single(struct platform_device *dev,
index+1, SCI_NPORTS);
dev_notice(&dev->dev, "Consider bumping "
"CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
- return 0;
+ return -EINVAL;
}
ret = sci_init_single(dev, sciport, index, p);
if (ret)
return ret;
- return uart_add_one_port(&sci_uart_driver, &sciport->port);
+ ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
+ if (ret) {
+ sci_cleanup_single(sciport);
+ return ret;
+ }
+
+ return 0;
}
static int __devinit sci_probe(struct platform_device *dev)
@@ -2413,24 +2425,22 @@ static int __devinit sci_probe(struct platform_device *dev)
ret = sci_probe_single(dev, dev->id, p, sp);
if (ret)
- goto err_unreg;
+ return ret;
sp->freq_transition.notifier_call = sci_notifier;
ret = cpufreq_register_notifier(&sp->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
- if (unlikely(ret < 0))
- goto err_unreg;
+ if (unlikely(ret < 0)) {
+ sci_cleanup_single(sp);
+ return ret;
+ }
#ifdef CONFIG_SH_STANDARD_BIOS
sh_bios_gdb_detach();
#endif
return 0;
-
-err_unreg:
- sci_remove(dev);
- return ret;
}
static int sci_suspend(struct device *dev)
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index c691eea..f5ed3d7 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -46,7 +46,7 @@ obj-$(CONFIG_USB_MICROTEK) += image/
obj-$(CONFIG_USB_SERIAL) += serial/
obj-$(CONFIG_USB) += misc/
-obj-$(CONFIG_USB) += phy/
+obj-$(CONFIG_USB_COMMON) += phy/
obj-$(CONFIG_EARLY_PRINTK_DBGP) += early/
obj-$(CONFIG_USB_ATM) += atm/
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index f2a120e..36a2a0b 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -567,6 +567,14 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
usb_autopm_put_interface(acm->control);
+ /*
+ * Unthrottle device in case the TTY was closed while throttled.
+ */
+ spin_lock_irq(&acm->read_lock);
+ acm->throttled = 0;
+ acm->throttle_req = 0;
+ spin_unlock_irq(&acm->read_lock);
+
if (acm_submit_read_urbs(acm, GFP_KERNEL))
goto error_submit_read_urbs;
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index ea8b304..8fd398d 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -55,6 +55,15 @@ static const struct usb_device_id wdm_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 9, /* NOTE: CDC ECM control interface! */
},
+ {
+ /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */
+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
+ USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = HUAWEI_VENDOR_ID,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 57, /* NOTE: CDC ECM control interface! */
+ },
{ }
};
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 57ed9e4..622b4a4 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -493,15 +493,6 @@ static int hcd_pci_suspend_noirq(struct device *dev)
pci_save_state(pci_dev);
- /*
- * Some systems crash if an EHCI controller is in D3 during
- * a sleep transition. We have to leave such controllers in D0.
- */
- if (hcd->broken_pci_sleep) {
- dev_dbg(dev, "Staying in PCI D0\n");
- return retval;
- }
-
/* If the root hub is dead rather than suspended, disallow remote
* wakeup. usb_hc_died() should ensure that both hosts are marked as
* dying, so we only need to check the primary roothub.
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 04fb834..25a7422 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3379,7 +3379,7 @@ int usb_disable_lpm(struct usb_device *udev)
return 0;
udev->lpm_disable_count++;
- if ((udev->u1_params.timeout == 0 && udev->u1_params.timeout == 0))
+ if ((udev->u1_params.timeout == 0 && udev->u2_params.timeout == 0))
return 0;
/* If LPM is enabled, attempt to disable it. */
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index b548cf1..bdd1c67 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1838,7 +1838,6 @@ free_interfaces:
intfc = cp->intf_cache[i];
intf->altsetting = intfc->altsetting;
intf->num_altsetting = intfc->num_altsetting;
- intf->intf_assoc = find_iad(dev, cp, i);
kref_get(&intfc->ref);
alt = usb_altnum_to_altsetting(intf, 0);
@@ -1851,6 +1850,8 @@ free_interfaces:
if (!alt)
alt = &intf->altsetting[0];
+ intf->intf_assoc =
+ find_iad(dev, cp, alt->desc.bInterfaceNumber);
intf->cur_altsetting = alt;
usb_enable_interface(dev, intf, true);
intf->dev.parent = &dev->dev;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 3df1a19..ec70df7 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1091,7 +1091,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
if (r == req) {
/* wait until it is processed */
dwc3_stop_active_transfer(dwc, dep->number);
- goto out0;
+ goto out1;
}
dev_err(dwc->dev, "request %p was not queued to %s\n",
request, ep->name);
@@ -1099,6 +1099,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
goto out0;
}
+out1:
/* giveback the request */
dwc3_gadget_giveback(dep, req, -ECONNRESET);
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index e23bf79..9a9bced 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -599,12 +599,6 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
spin_lock_irqsave(&ep->udc->lock, flags);
- if (ep->ep.desc) {
- spin_unlock_irqrestore(&ep->udc->lock, flags);
- DBG(DBG_ERR, "ep%d already enabled\n", ep->index);
- return -EBUSY;
- }
-
ep->ep.desc = desc;
ep->ep.maxpacket = maxpacket;
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 51881f3..b09452d 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -1596,7 +1596,7 @@ static int qe_ep_enable(struct usb_ep *_ep,
ep = container_of(_ep, struct qe_ep, ep);
/* catch various bogus parameters */
- if (!_ep || !desc || ep->ep.desc || _ep->name == ep_name[0] ||
+ if (!_ep || !desc || _ep->name == ep_name[0] ||
(desc->bDescriptorType != USB_DT_ENDPOINT))
return -EINVAL;
diff --git a/drivers/usb/gadget/fsl_qe_udc.h b/drivers/usb/gadget/fsl_qe_udc.h
index 4c07ca9..7026919 100644
--- a/drivers/usb/gadget/fsl_qe_udc.h
+++ b/drivers/usb/gadget/fsl_qe_udc.h
@@ -153,10 +153,10 @@ struct usb_ep_para{
#define USB_BUSMODE_DTB 0x02
/* Endpoint basic handle */
-#define ep_index(EP) ((EP)->desc->bEndpointAddress & 0xF)
+#define ep_index(EP) ((EP)->ep.desc->bEndpointAddress & 0xF)
#define ep_maxpacket(EP) ((EP)->ep.maxpacket)
#define ep_is_in(EP) ((ep_index(EP) == 0) ? (EP->udc->ep0_dir == \
- USB_DIR_IN) : ((EP)->desc->bEndpointAddress \
+ USB_DIR_IN) : ((EP)->ep.desc->bEndpointAddress \
& USB_DIR_IN) == USB_DIR_IN)
/* ep0 transfer state */
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index 2831685..bc6f9bb 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -567,7 +567,7 @@ static int fsl_ep_enable(struct usb_ep *_ep,
ep = container_of(_ep, struct fsl_ep, ep);
/* catch various bogus parameters */
- if (!_ep || !desc || ep->ep.desc
+ if (!_ep || !desc
|| (desc->bDescriptorType != USB_DT_ENDPOINT))
return -EINVAL;
@@ -2575,7 +2575,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
/* for ep0: the desc defined here;
* for other eps, gadget layer called ep_enable with defined desc
*/
- udc_controller->eps[0].desc = &fsl_ep0_desc;
+ udc_controller->eps[0].ep.desc = &fsl_ep0_desc;
udc_controller->eps[0].ep.maxpacket = USB_MAX_CTRL_PAYLOAD;
/* setup the udc->eps[] for non-control endpoints and link
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index 5cd7b7e..f61a967 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -568,10 +568,10 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
/*
* ### internal used help routines.
*/
-#define ep_index(EP) ((EP)->desc->bEndpointAddress&0xF)
+#define ep_index(EP) ((EP)->ep.desc->bEndpointAddress&0xF)
#define ep_maxpacket(EP) ((EP)->ep.maxpacket)
#define ep_is_in(EP) ( (ep_index(EP) == 0) ? (EP->udc->ep0_dir == \
- USB_DIR_IN ):((EP)->desc->bEndpointAddress \
+ USB_DIR_IN) : ((EP)->ep.desc->bEndpointAddress \
& USB_DIR_IN)==USB_DIR_IN)
#define get_ep_by_pipe(udc, pipe) ((pipe == 1)? &udc->eps[0]: \
&udc->eps[pipe])
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index b241e6c..3d28fb9 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -102,7 +102,7 @@ goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
unsigned long flags;
ep = container_of(_ep, struct goku_ep, ep);
- if (!_ep || !desc || ep->ep.desc
+ if (!_ep || !desc
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
dev = ep->dev;
diff --git a/drivers/usb/gadget/lpc32xx_udc.c b/drivers/usb/gadget/lpc32xx_udc.c
index 262acfd..2ab0388 100644
--- a/drivers/usb/gadget/lpc32xx_udc.c
+++ b/drivers/usb/gadget/lpc32xx_udc.c
@@ -61,6 +61,7 @@
#include <mach/irqs.h>
#include <mach/board.h>
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+#include <linux/debugfs.h>
#include <linux/seq_file.h>
#endif
diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c
index dbcd132..117a4bb 100644
--- a/drivers/usb/gadget/mv_udc_core.c
+++ b/drivers/usb/gadget/mv_udc_core.c
@@ -464,7 +464,7 @@ static int mv_ep_enable(struct usb_ep *_ep,
ep = container_of(_ep, struct mv_ep, ep);
udc = ep->udc;
- if (!_ep || !desc || ep->ep.desc
+ if (!_ep || !desc
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 7ba3246..a460e8c 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -153,7 +153,7 @@ static int omap_ep_enable(struct usb_ep *_ep,
u16 maxp;
/* catch various bogus parameters */
- if (!_ep || !desc || ep->ep.desc
+ if (!_ep || !desc
|| desc->bDescriptorType != USB_DT_ENDPOINT
|| ep->bEndpointAddress != desc->bEndpointAddress
|| ep->maxpacket < usb_endpoint_maxp(desc)) {
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index d7c8cb3..f7ff9e8 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -218,7 +218,7 @@ static int pxa25x_ep_enable (struct usb_ep *_ep,
struct pxa25x_udc *dev;
ep = container_of (_ep, struct pxa25x_ep, ep);
- if (!_ep || !desc || ep->ep.desc || _ep->name == ep0name
+ if (!_ep || !desc || _ep->name == ep0name
|| desc->bDescriptorType != USB_DT_ENDPOINT
|| ep->bEndpointAddress != desc->bEndpointAddress
|| ep->fifo_size < usb_endpoint_maxp (desc)) {
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index 36c6836..236b271 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -760,7 +760,7 @@ static int s3c_hsudc_ep_enable(struct usb_ep *_ep,
u32 ecr = 0;
hsep = our_ep(_ep);
- if (!_ep || !desc || hsep->ep.desc || _ep->name == ep0name
+ if (!_ep || !desc || _ep->name == ep0name
|| desc->bDescriptorType != USB_DT_ENDPOINT
|| hsep->bEndpointAddress != desc->bEndpointAddress
|| ep_maxpacket(hsep) < usb_endpoint_maxp(desc))
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 3de71d3..f2e51f5 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -1062,7 +1062,7 @@ static int s3c2410_udc_ep_enable(struct usb_ep *_ep,
ep = to_s3c2410_ep(_ep);
- if (!_ep || !desc || ep->ep.desc
+ if (!_ep || !desc
|| _ep->name == ep0name
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index b100f5f..800be38 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -671,7 +671,9 @@ static int ehci_init(struct usb_hcd *hcd)
hw = ehci->async->hw;
hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
+#if defined(CONFIG_PPC_PS3)
hw->hw_info1 |= cpu_to_hc32(ehci, (1 << 7)); /* I = 1 */
+#endif
hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
hw->hw_qtd_next = EHCI_LIST_END(ehci);
ehci->async->qh_state = QH_STATE_LINKED;
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index a44294d..17cfb8a 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -43,6 +43,7 @@
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/gpio.h>
+#include <linux/clk.h>
/* EHCI Register Set */
#define EHCI_INSNREG04 (0xA0)
@@ -55,6 +56,15 @@
#define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8
#define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0
+/* Errata i693 */
+static struct clk *utmi_p1_fck;
+static struct clk *utmi_p2_fck;
+static struct clk *xclk60mhsp1_ck;
+static struct clk *xclk60mhsp2_ck;
+static struct clk *usbhost_p1_fck;
+static struct clk *usbhost_p2_fck;
+static struct clk *init_60m_fclk;
+
/*-------------------------------------------------------------------------*/
static const struct hc_driver ehci_omap_hc_driver;
@@ -70,6 +80,41 @@ static inline u32 ehci_read(void __iomem *base, u32 reg)
return __raw_readl(base + reg);
}
+/* Erratum i693 workaround sequence */
+static void omap_ehci_erratum_i693(struct ehci_hcd *ehci)
+{
+ int ret = 0;
+
+ /* Switch to the internal 60 MHz clock */
+ ret = clk_set_parent(utmi_p1_fck, init_60m_fclk);
+ if (ret != 0)
+ ehci_err(ehci, "init_60m_fclk set parent"
+ "failed error:%d\n", ret);
+
+ ret = clk_set_parent(utmi_p2_fck, init_60m_fclk);
+ if (ret != 0)
+ ehci_err(ehci, "init_60m_fclk set parent"
+ "failed error:%d\n", ret);
+
+ clk_enable(usbhost_p1_fck);
+ clk_enable(usbhost_p2_fck);
+
+ /* Wait 1ms and switch back to the external clock */
+ mdelay(1);
+ ret = clk_set_parent(utmi_p1_fck, xclk60mhsp1_ck);
+ if (ret != 0)
+ ehci_err(ehci, "xclk60mhsp1_ck set parent"
+ "failed error:%d\n", ret);
+
+ ret = clk_set_parent(utmi_p2_fck, xclk60mhsp2_ck);
+ if (ret != 0)
+ ehci_err(ehci, "xclk60mhsp2_ck set parent"
+ "failed error:%d\n", ret);
+
+ clk_disable(usbhost_p1_fck);
+ clk_disable(usbhost_p2_fck);
+}
+
static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port)
{
struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev);
@@ -100,6 +145,50 @@ static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port)
}
}
+static int omap_ehci_hub_control(
+ struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength
+)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ u32 __iomem *status_reg = &ehci->regs->port_status[
+ (wIndex & 0xff) - 1];
+ u32 temp;
+ unsigned long flags;
+ int retval = 0;
+
+ spin_lock_irqsave(&ehci->lock, flags);
+
+ if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
+ temp = ehci_readl(ehci, status_reg);
+ if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
+ retval = -EPIPE;
+ goto done;
+ }
+
+ temp &= ~PORT_WKCONN_E;
+ temp |= PORT_WKDISC_E | PORT_WKOC_E;
+ ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+
+ omap_ehci_erratum_i693(ehci);
+
+ set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
+ goto done;
+ }
+
+ spin_unlock_irqrestore(&ehci->lock, flags);
+
+ /* Handle the hub control events here */
+ return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+done:
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ return retval;
+}
+
static void disable_put_regulator(
struct ehci_hcd_omap_platform_data *pdata)
{
@@ -264,8 +353,76 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
/* root ports should always stay powered */
ehci_port_power(omap_ehci, 1);
+ /* get clocks */
+ utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
+ if (IS_ERR(utmi_p1_fck)) {
+ ret = PTR_ERR(utmi_p1_fck);
+ dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
+ goto err_add_hcd;
+ }
+
+ xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
+ if (IS_ERR(xclk60mhsp1_ck)) {
+ ret = PTR_ERR(xclk60mhsp1_ck);
+ dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret);
+ goto err_utmi_p1_fck;
+ }
+
+ utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk");
+ if (IS_ERR(utmi_p2_fck)) {
+ ret = PTR_ERR(utmi_p2_fck);
+ dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
+ goto err_xclk60mhsp1_ck;
+ }
+
+ xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck");
+ if (IS_ERR(xclk60mhsp2_ck)) {
+ ret = PTR_ERR(xclk60mhsp2_ck);
+ dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret);
+ goto err_utmi_p2_fck;
+ }
+
+ usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk");
+ if (IS_ERR(usbhost_p1_fck)) {
+ ret = PTR_ERR(usbhost_p1_fck);
+ dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret);
+ goto err_xclk60mhsp2_ck;
+ }
+
+ usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk");
+ if (IS_ERR(usbhost_p2_fck)) {
+ ret = PTR_ERR(usbhost_p2_fck);
+ dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret);
+ goto err_usbhost_p1_fck;
+ }
+
+ init_60m_fclk = clk_get(dev, "init_60m_fclk");
+ if (IS_ERR(init_60m_fclk)) {
+ ret = PTR_ERR(init_60m_fclk);
+ dev_err(dev, "init_60m_fclk failed error:%d\n", ret);
+ goto err_usbhost_p2_fck;
+ }
+
return 0;
+err_usbhost_p2_fck:
+ clk_put(usbhost_p2_fck);
+
+err_usbhost_p1_fck:
+ clk_put(usbhost_p1_fck);
+
+err_xclk60mhsp2_ck:
+ clk_put(xclk60mhsp2_ck);
+
+err_utmi_p2_fck:
+ clk_put(utmi_p2_fck);
+
+err_xclk60mhsp1_ck:
+ clk_put(xclk60mhsp1_ck);
+
+err_utmi_p1_fck:
+ clk_put(utmi_p1_fck);
+
err_add_hcd:
disable_put_regulator(pdata);
pm_runtime_put_sync(dev);
@@ -294,6 +451,15 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
disable_put_regulator(dev->platform_data);
iounmap(hcd->regs);
usb_put_hcd(hcd);
+
+ clk_put(utmi_p1_fck);
+ clk_put(utmi_p2_fck);
+ clk_put(xclk60mhsp1_ck);
+ clk_put(xclk60mhsp2_ck);
+ clk_put(usbhost_p1_fck);
+ clk_put(usbhost_p2_fck);
+ clk_put(init_60m_fclk);
+
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
@@ -364,7 +530,7 @@ static const struct hc_driver ehci_omap_hc_driver = {
* root hub support
*/
.hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
+ .hub_control = omap_ehci_hub_control,
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index bc94d7b..1234817 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -144,14 +144,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
hcd->has_tt = 1;
tdi_reset(ehci);
}
- if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) {
- /* EHCI #1 or #2 on 6 Series/C200 Series chipset */
- if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) {
- ehci_info(ehci, "broken D3 during system sleep on ASUS\n");
- hcd->broken_pci_sleep = 1;
- device_set_wakeup_capable(&pdev->dev, false);
- }
- }
break;
case PCI_VENDOR_ID_TDI:
if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index ca819cd..e7cb392 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -126,8 +126,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
goto fail_create_hcd;
}
- if (pdev->dev.platform_data != NULL)
- pdata = pdev->dev.platform_data;
+ pdata = pdev->dev.platform_data;
/* initialize hcd */
hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev,
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 9c2cc46..e9713d5 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -270,14 +270,12 @@ static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
*
* Properly shutdown the hcd, call driver's shutdown routine.
*/
-static int ehci_hcd_xilinx_of_shutdown(struct platform_device *op)
+static void ehci_hcd_xilinx_of_shutdown(struct platform_device *op)
{
struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
if (hcd->driver->shutdown)
hcd->driver->shutdown(hcd);
-
- return 0;
}
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 836772d..2f3619e 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -317,7 +317,7 @@ static int ohci_bus_resume (struct usb_hcd *hcd)
}
/* Carry out the final steps of resuming the controller device */
-static void ohci_finish_controller_resume(struct usb_hcd *hcd)
+static void __maybe_unused ohci_finish_controller_resume(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int port;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index ec4338e..77689bd 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -793,10 +793,9 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
int slot_id)
{
- struct list_head *tt;
struct list_head *tt_list_head;
- struct list_head *tt_next;
- struct xhci_tt_bw_info *tt_info;
+ struct xhci_tt_bw_info *tt_info, *next;
+ bool slot_found = false;
/* If the device never made it past the Set Address stage,
* it may not have the real_port set correctly.
@@ -808,34 +807,16 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci,
}
tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
- if (list_empty(tt_list_head))
- return;
-
- list_for_each(tt, tt_list_head) {
- tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list);
- if (tt_info->slot_id == slot_id)
+ list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
+ /* Multi-TT hubs will have more than one entry */
+ if (tt_info->slot_id == slot_id) {
+ slot_found = true;
+ list_del(&tt_info->tt_list);
+ kfree(tt_info);
+ } else if (slot_found) {
break;
+ }
}
- /* Cautionary measure in case the hub was disconnected before we
- * stored the TT information.
- */
- if (tt_info->slot_id != slot_id)
- return;
-
- tt_next = tt->next;
- tt_info = list_entry(tt, struct xhci_tt_bw_info,
- tt_list);
- /* Multi-TT hubs will have more than one entry */
- do {
- list_del(tt);
- kfree(tt_info);
- tt = tt_next;
- if (list_empty(tt_list_head))
- break;
- tt_next = tt->next;
- tt_info = list_entry(tt, struct xhci_tt_bw_info,
- tt_list);
- } while (tt_info->slot_id == slot_id);
}
int xhci_alloc_tt_info(struct xhci_hcd *xhci,
@@ -1791,17 +1772,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
struct dev_info *dev_info, *next;
- struct list_head *tt_list_head;
- struct list_head *tt;
- struct list_head *endpoints;
- struct list_head *ep, *q;
- struct xhci_tt_bw_info *tt_info;
- struct xhci_interval_bw_table *bwt;
- struct xhci_virt_ep *virt_ep;
-
unsigned long flags;
int size;
- int i;
+ int i, j, num_ports;
/* Free the Event Ring Segment Table and the actual Event Ring */
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
@@ -1860,21 +1833,22 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
}
spin_unlock_irqrestore(&xhci->lock, flags);
- bwt = &xhci->rh_bw->bw_table;
- for (i = 0; i < XHCI_MAX_INTERVAL; i++) {
- endpoints = &bwt->interval_bw[i].endpoints;
- list_for_each_safe(ep, q, endpoints) {
- virt_ep = list_entry(ep, struct xhci_virt_ep, bw_endpoint_list);
- list_del(&virt_ep->bw_endpoint_list);
- kfree(virt_ep);
+ num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+ for (i = 0; i < num_ports; i++) {
+ struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
+ for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
+ struct list_head *ep = &bwt->interval_bw[j].endpoints;
+ while (!list_empty(ep))
+ list_del_init(ep->next);
}
}
- tt_list_head = &xhci->rh_bw->tts;
- list_for_each_safe(tt, q, tt_list_head) {
- tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list);
- list_del(tt);
- kfree(tt_info);
+ for (i = 0; i < num_ports; i++) {
+ struct xhci_tt_bw_info *tt, *n;
+ list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
+ list_del(&tt->tt_list);
+ kfree(tt);
+ }
}
xhci->num_usb2_ports = 0;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index afdc73e..a979cd0 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -795,8 +795,8 @@ int xhci_suspend(struct xhci_hcd *xhci)
command = xhci_readl(xhci, &xhci->op_regs->command);
command |= CMD_CSS;
xhci_writel(xhci, command, &xhci->op_regs->command);
- if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) {
- xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n");
+ if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) {
+ xhci_warn(xhci, "WARN: xHC save state timeout\n");
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
}
@@ -848,8 +848,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
command |= CMD_CRS;
xhci_writel(xhci, command, &xhci->op_regs->command);
if (handshake(xhci, &xhci->op_regs->status,
- STS_RESTORE, 0, 10*100)) {
- xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n");
+ STS_RESTORE, 0, 10 * 1000)) {
+ xhci_warn(xhci, "WARN: xHC restore state timeout\n");
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
}
@@ -3906,7 +3906,7 @@ static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
default:
dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
__func__);
- return -EINVAL;
+ return USB3_LPM_DISABLED;
}
if (sel <= max_sel_pel && pel <= max_sel_pel)
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 768b4b5..9d63ba4 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -34,6 +34,7 @@
#include <linux/dma-mapping.h>
#include <mach/cputype.h>
+#include <mach/hardware.h>
#include <asm/mach-types.h>
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h
index 046c844..371baa0 100644
--- a/drivers/usb/musb/davinci.h
+++ b/drivers/usb/musb/davinci.h
@@ -15,7 +15,7 @@
*/
/* Integrated highspeed/otg PHY */
-#define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34)
+#define USBPHY_CTL_PADDR 0x01c40034
#define USBPHY_DATAPOL BIT(11) /* (dm355) switch D+/D- */
#define USBPHY_PHYCLKGD BIT(8)
#define USBPHY_SESNDEN BIT(7) /* v(sess_end) comparator */
@@ -27,7 +27,7 @@
#define USBPHY_OTGPDWN BIT(1)
#define USBPHY_PHYPDWN BIT(0)
-#define DM355_DEEPSLEEP_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x48)
+#define DM355_DEEPSLEEP_PADDR 0x01c40048
#define DRVVBUS_FORCE BIT(2)
#define DRVVBUS_OVERRIDE BIT(1)
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index f42c29b..95918da 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1232,6 +1232,7 @@ static int musb_gadget_disable(struct usb_ep *ep)
}
musb_ep->desc = NULL;
+ musb_ep->end_point.desc = NULL;
/* abort all pending DMA and requests */
nuke(musb_ep, -ESHUTDOWN);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index ef8d744..e090c79 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -375,11 +375,21 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
*/
if (list_empty(&qh->hep->urb_list)) {
struct list_head *head;
+ struct dma_controller *dma = musb->dma_controller;
- if (is_in)
+ if (is_in) {
ep->rx_reinit = 1;
- else
+ if (ep->rx_channel) {
+ dma->channel_release(ep->rx_channel);
+ ep->rx_channel = NULL;
+ }
+ } else {
ep->tx_reinit = 1;
+ if (ep->tx_channel) {
+ dma->channel_release(ep->tx_channel);
+ ep->tx_channel = NULL;
+ }
+ }
/* Clobber old pointers to this qh */
musb_ep_set_qh(ep, is_in, NULL);
diff --git a/drivers/usb/otg/twl6030-usb.c b/drivers/usb/otg/twl6030-usb.c
index d2a9a8e..0eabb04 100644
--- a/drivers/usb/otg/twl6030-usb.c
+++ b/drivers/usb/otg/twl6030-usb.c
@@ -305,9 +305,8 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
regulator_enable(twl->usb3v3);
twl->asleep = 1;
- twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_CLR, 0x1);
- twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET,
- 0x10);
+ twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_CLR);
+ twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET);
status = USB_EVENT_ID;
otg->default_a = true;
twl->phy.state = OTG_STATE_A_IDLE;
@@ -316,12 +315,10 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
atomic_notifier_call_chain(&twl->phy.notifier, status,
otg->gadget);
} else {
- twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_CLR,
- 0x10);
- twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET,
- 0x1);
+ twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR);
+ twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
}
- twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_LATCH_CLR, status);
+ twl6030_writeb(twl, TWL_MODULE_USB, status, USB_ID_INT_LATCH_CLR);
return IRQ_HANDLED;
}
@@ -343,7 +340,7 @@ static int twl6030_enable_irq(struct usb_phy *x)
{
struct twl6030_usb *twl = phy_to_twl(x);
- twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET, 0x1);
+ twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
twl6030_interrupt_unmask(0x05, REG_INT_MSK_LINE_C);
twl6030_interrupt_unmask(0x05, REG_INT_MSK_STS_C);
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 3cfabcb..e7cf84f 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -2,11 +2,11 @@
# Physical Layer USB driver configuration
#
comment "USB Physical Layer drivers"
- depends on USB
+ depends on USB || USB_GADGET
config USB_ISP1301
tristate "NXP ISP1301 USB transceiver support"
- depends on USB
+ depends on USB || USB_GADGET
depends on I2C
help
Say Y here to add support for the NXP ISP1301 USB transceiver driver.
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 1b19262..1e71079 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -82,6 +82,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
{ USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
{ USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
+ { USB_DEVICE(0x10C4, 0x80C4) }, /* Cygnal Integrated Products, Inc., Optris infrared thermometer */
{ USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
{ USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */
{ USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */
@@ -92,6 +93,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
{ USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
{ USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
+ { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
{ USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
{ USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
{ USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
@@ -133,7 +135,13 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
+ { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
+ { USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */
{ USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
+ { USB_DEVICE(0x166A, 0x0304) }, /* Clipsal 5000CT2 C-Bus Black and White Touchscreen */
+ { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */
+ { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */
+ { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */
{ USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
{ USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
{ USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
@@ -145,7 +153,11 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
+ { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
+ { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
+ { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
{ } /* Terminating Entry */
};
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8c084ea..bc912e5 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -737,6 +737,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index f3c7c78..5661c7e 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -784,6 +784,7 @@
#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */
#define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */
+#define RTSYSTEMS_RTS01_PID 0x9e57 /* USB-RTS01 Radio Cable */
/*
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 105a6d8..9b026bf 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -39,13 +39,6 @@ MODULE_PARM_DESC(product, "User specified USB idProduct");
static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */
-/* we want to look at all devices, as the vendor/product id can change
- * depending on the command line argument */
-static const struct usb_device_id generic_serial_ids[] = {
- {.driver_info = 42},
- {}
-};
-
/* All of the device info needed for the Generic Serial Converter */
struct usb_serial_driver usb_serial_generic_device = {
.driver = {
@@ -79,7 +72,8 @@ int usb_serial_generic_register(int _debug)
USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT;
/* register our generic driver with ourselves */
- retval = usb_serial_register_drivers(serial_drivers, "usbserial_generic", generic_serial_ids);
+ retval = usb_serial_register_drivers(serial_drivers,
+ "usbserial_generic", generic_device_ids);
#endif
return retval;
}
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index d0ec1aa..a71fa0a 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -309,13 +309,16 @@ static int mct_u232_set_modem_ctrl(struct usb_serial *serial,
MCT_U232_SET_REQUEST_TYPE,
0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE,
WDR_TIMEOUT);
- if (rc < 0)
- dev_err(&serial->dev->dev,
- "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
+ kfree(buf);
+
dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr);
- kfree(buf);
- return rc;
+ if (rc < 0) {
+ dev_err(&serial->dev->dev,
+ "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
+ return rc;
+ }
+ return 0;
} /* mct_u232_set_modem_ctrl */
static int mct_u232_get_modem_stat(struct usb_serial *serial,
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 29160f8..57eca24 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -190,7 +190,7 @@
static int device_type;
-static const struct usb_device_id id_table[] __devinitconst = {
+static const struct usb_device_id id_table[] = {
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7810)},
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 1aae902..adf8ce7 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -47,6 +47,7 @@
/* Function prototypes */
static int option_probe(struct usb_serial *serial,
const struct usb_device_id *id);
+static void option_release(struct usb_serial *serial);
static int option_send_setup(struct usb_serial_port *port);
static void option_instat_callback(struct urb *urb);
@@ -150,6 +151,7 @@ static void option_instat_callback(struct urb *urb);
#define HUAWEI_PRODUCT_E14AC 0x14AC
#define HUAWEI_PRODUCT_K3806 0x14AE
#define HUAWEI_PRODUCT_K4605 0x14C6
+#define HUAWEI_PRODUCT_K5005 0x14C8
#define HUAWEI_PRODUCT_K3770 0x14C9
#define HUAWEI_PRODUCT_K3771 0x14CA
#define HUAWEI_PRODUCT_K4510 0x14CB
@@ -234,6 +236,7 @@ static void option_instat_callback(struct urb *urb);
#define NOVATELWIRELESS_PRODUCT_G1 0xA001
#define NOVATELWIRELESS_PRODUCT_G1_M 0xA002
#define NOVATELWIRELESS_PRODUCT_G2 0xA010
+#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
/* AMOI PRODUCTS */
#define AMOI_VENDOR_ID 0x1614
@@ -425,7 +428,7 @@ static void option_instat_callback(struct urb *urb);
#define SAMSUNG_VENDOR_ID 0x04e8
#define SAMSUNG_PRODUCT_GT_B3730 0x6889
-/* YUGA products www.yuga-info.com*/
+/* YUGA products www.yuga-info.com gavin.kx@qq.com */
#define YUGA_VENDOR_ID 0x257A
#define YUGA_PRODUCT_CEM600 0x1601
#define YUGA_PRODUCT_CEM610 0x1602
@@ -442,6 +445,8 @@ static void option_instat_callback(struct urb *urb);
#define YUGA_PRODUCT_CEU516 0x160C
#define YUGA_PRODUCT_CEU528 0x160D
#define YUGA_PRODUCT_CEU526 0x160F
+#define YUGA_PRODUCT_CEU881 0x161F
+#define YUGA_PRODUCT_CEU882 0x162F
#define YUGA_PRODUCT_CWM600 0x2601
#define YUGA_PRODUCT_CWM610 0x2602
@@ -457,23 +462,26 @@ static void option_instat_callback(struct urb *urb);
#define YUGA_PRODUCT_CWU518 0x260B
#define YUGA_PRODUCT_CWU516 0x260C
#define YUGA_PRODUCT_CWU528 0x260D
+#define YUGA_PRODUCT_CWU581 0x260E
#define YUGA_PRODUCT_CWU526 0x260F
-
-#define YUGA_PRODUCT_CLM600 0x2601
-#define YUGA_PRODUCT_CLM610 0x2602
-#define YUGA_PRODUCT_CLM500 0x2603
-#define YUGA_PRODUCT_CLM510 0x2604
-#define YUGA_PRODUCT_CLM800 0x2605
-#define YUGA_PRODUCT_CLM900 0x2606
-
-#define YUGA_PRODUCT_CLU718 0x2607
-#define YUGA_PRODUCT_CLU716 0x2608
-#define YUGA_PRODUCT_CLU728 0x2609
-#define YUGA_PRODUCT_CLU726 0x260A
-#define YUGA_PRODUCT_CLU518 0x260B
-#define YUGA_PRODUCT_CLU516 0x260C
-#define YUGA_PRODUCT_CLU528 0x260D
-#define YUGA_PRODUCT_CLU526 0x260F
+#define YUGA_PRODUCT_CWU582 0x261F
+#define YUGA_PRODUCT_CWU583 0x262F
+
+#define YUGA_PRODUCT_CLM600 0x3601
+#define YUGA_PRODUCT_CLM610 0x3602
+#define YUGA_PRODUCT_CLM500 0x3603
+#define YUGA_PRODUCT_CLM510 0x3604
+#define YUGA_PRODUCT_CLM800 0x3605
+#define YUGA_PRODUCT_CLM900 0x3606
+
+#define YUGA_PRODUCT_CLU718 0x3607
+#define YUGA_PRODUCT_CLU716 0x3608
+#define YUGA_PRODUCT_CLU728 0x3609
+#define YUGA_PRODUCT_CLU726 0x360A
+#define YUGA_PRODUCT_CLU518 0x360B
+#define YUGA_PRODUCT_CLU516 0x360C
+#define YUGA_PRODUCT_CLU528 0x360D
+#define YUGA_PRODUCT_CLU526 0x360F
/* Viettel products */
#define VIETTEL_VENDOR_ID 0x2262
@@ -490,6 +498,10 @@ static void option_instat_callback(struct urb *urb);
/* MediaTek products */
#define MEDIATEK_VENDOR_ID 0x0e8d
+/* Cellient products */
+#define CELLIENT_VENDOR_ID 0x2692
+#define CELLIENT_PRODUCT_MEN200 0x9005
+
/* some devices interfaces need special handling due to a number of reasons */
enum option_blacklist_reason {
OPTION_BLACKLIST_NONE = 0,
@@ -666,6 +678,11 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x31) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x32) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x33) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
@@ -722,6 +739,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
+ /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
+ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
@@ -1209,6 +1228,11 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU881) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU882) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU581) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU582) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU583) },
{ USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) },
{ USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */
@@ -1216,6 +1240,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) }, /* MediaTek MT6276M modem & app port */
+ { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
@@ -1245,7 +1270,7 @@ static struct usb_serial_driver option_1port_device = {
.ioctl = usb_wwan_ioctl,
.attach = usb_wwan_startup,
.disconnect = usb_wwan_disconnect,
- .release = usb_wwan_release,
+ .release = option_release,
.read_int_callback = option_instat_callback,
#ifdef CONFIG_PM
.suspend = usb_wwan_suspend,
@@ -1259,35 +1284,6 @@ static struct usb_serial_driver * const serial_drivers[] = {
static bool debug;
-/* per port private data */
-
-#define N_IN_URB 4
-#define N_OUT_URB 4
-#define IN_BUFLEN 4096
-#define OUT_BUFLEN 4096
-
-struct option_port_private {
- /* Input endpoints and buffer for this port */
- struct urb *in_urbs[N_IN_URB];
- u8 *in_buffer[N_IN_URB];
- /* Output endpoints and buffer for this port */
- struct urb *out_urbs[N_OUT_URB];
- u8 *out_buffer[N_OUT_URB];
- unsigned long out_busy; /* Bit vector of URBs in use */
- int opened;
- struct usb_anchor delayed;
-
- /* Settings for the port */
- int rts_state; /* Handshaking pins (outputs) */
- int dtr_state;
- int cts_state; /* Handshaking pins (inputs) */
- int dsr_state;
- int dcd_state;
- int ri_state;
-
- unsigned long tx_start_time[N_OUT_URB];
-};
-
module_usb_serial_driver(serial_drivers, option_ids);
static bool is_blacklisted(const u8 ifnum, enum option_blacklist_reason reason,
@@ -1356,12 +1352,22 @@ static int option_probe(struct usb_serial *serial,
return 0;
}
+static void option_release(struct usb_serial *serial)
+{
+ struct usb_wwan_intf_private *priv = usb_get_serial_data(serial);
+
+ usb_wwan_release(serial);
+
+ kfree(priv);
+}
+
static void option_instat_callback(struct urb *urb)
{
int err;
int status = urb->status;
struct usb_serial_port *port = urb->context;
- struct option_port_private *portdata = usb_get_serial_port_data(port);
+ struct usb_wwan_port_private *portdata =
+ usb_get_serial_port_data(port);
dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata);
@@ -1421,7 +1427,7 @@ static int option_send_setup(struct usb_serial_port *port)
struct usb_serial *serial = port->serial;
struct usb_wwan_intf_private *intfdata =
(struct usb_wwan_intf_private *) serial->private;
- struct option_port_private *portdata;
+ struct usb_wwan_port_private *portdata;
int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
int val = 0;
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 0d5fe59..996015c 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -105,7 +105,13 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */
{USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */
{USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
+ {USB_DEVICE(0x1199, 0x9010)}, /* Sierra Wireless Gobi 3000 QDL */
+ {USB_DEVICE(0x1199, 0x9012)}, /* Sierra Wireless Gobi 3000 QDL */
{USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
+ {USB_DEVICE(0x1199, 0x9014)}, /* Sierra Wireless Gobi 3000 QDL */
+ {USB_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
+ {USB_DEVICE(0x1199, 0x9018)}, /* Sierra Wireless Gobi 3000 QDL */
+ {USB_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
{USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */
{USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */
{ } /* Terminating entry */
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index ba54a0a..d423d36 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -294,6 +294,10 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
+ /* AT&T Direct IP LTE modems */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
+ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ },
{ USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 6a1b609..27483f9 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -659,12 +659,14 @@ exit:
static struct usb_serial_driver *search_serial_device(
struct usb_interface *iface)
{
- const struct usb_device_id *id;
+ const struct usb_device_id *id = NULL;
struct usb_serial_driver *drv;
+ struct usb_driver *driver = to_usb_driver(iface->dev.driver);
/* Check if the usb id matches a known device */
list_for_each_entry(drv, &usb_serial_driver_list, driver_list) {
- id = get_iface_id(drv, iface);
+ if (drv->usb_driver == driver)
+ id = get_iface_id(drv, iface);
if (id)
return drv;
}
@@ -755,7 +757,7 @@ static int usb_serial_probe(struct usb_interface *interface,
if (retval) {
dbg("sub driver rejected device");
- kfree(serial);
+ usb_serial_put(serial);
module_put(type->driver.owner);
return retval;
}
@@ -827,7 +829,7 @@ static int usb_serial_probe(struct usb_interface *interface,
*/
if (num_bulk_in == 0 || num_bulk_out == 0) {
dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n");
- kfree(serial);
+ usb_serial_put(serial);
module_put(type->driver.owner);
return -ENODEV;
}
@@ -841,7 +843,7 @@ static int usb_serial_probe(struct usb_interface *interface,
if (num_ports == 0) {
dev_err(&interface->dev,
"Generic device with no bulk out, not allowed.\n");
- kfree(serial);
+ usb_serial_put(serial);
module_put(type->driver.owner);
return -EIO;
}
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index a324a5d..11418da 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -202,6 +202,12 @@ static int slave_configure(struct scsi_device *sdev)
if (us->fflags & US_FL_NO_READ_CAPACITY_16)
sdev->no_read_capacity_16 = 1;
+ /*
+ * Many devices do not respond properly to READ_CAPACITY_16.
+ * Tell the SCSI layer to try READ_CAPACITY_10 first.
+ */
+ sdev->try_rc_10_first = 1;
+
/* assume SPC3 or latter devices support sense size > 18 */
if (sdev->scsi_level > SCSI_SPC_2)
us->fflags |= US_FL_SANE_SENSE;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 94dbd25..112156f 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -191,7 +191,9 @@ static int vhost_worker(void *data)
struct vhost_dev *dev = data;
struct vhost_work *work = NULL;
unsigned uninitialized_var(seq);
+ mm_segment_t oldfs = get_fs();
+ set_fs(USER_DS);
use_mm(dev->mm);
for (;;) {
@@ -229,6 +231,7 @@ static int vhost_worker(void *data)
}
unuse_mm(dev->mm);
+ set_fs(oldfs);
return 0;
}
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index fa2b037..2979292 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -88,7 +88,7 @@ config LCD_PLATFORM
config LCD_TOSA
tristate "Sharp SL-6000 LCD Driver"
- depends on SPI && MACH_TOSA
+ depends on I2C && SPI && MACH_TOSA
help
If you have an Sharp SL-6000 Zaurus say Y to enable a driver
for its LCD.
diff --git a/drivers/video/backlight/ili9320.c b/drivers/video/backlight/ili9320.c
index 6c93993..9327cd1 100644
--- a/drivers/video/backlight/ili9320.c
+++ b/drivers/video/backlight/ili9320.c
@@ -263,7 +263,7 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
EXPORT_SYMBOL_GPL(ili9320_probe_spi);
-int __devexit ili9320_remove(struct ili9320 *ili)
+int ili9320_remove(struct ili9320 *ili)
{
ili9320_power(ili, FB_BLANK_POWERDOWN);
diff --git a/drivers/video/bfin_adv7393fb.c b/drivers/video/bfin_adv7393fb.c
index 33ea874..9bdd4b0 100644
--- a/drivers/video/bfin_adv7393fb.c
+++ b/drivers/video/bfin_adv7393fb.c
@@ -353,18 +353,16 @@ adv7393_read_proc(char *page, char **start, off_t off,
static int
adv7393_write_proc(struct file *file, const char __user * buffer,
- unsigned long count, void *data)
+ size_t count, void *data)
{
struct adv7393fb_device *fbdev = data;
- char line[8];
unsigned int val;
int ret;
- ret = copy_from_user(line, buffer, count);
+ ret = kstrtouint_from_user(buffer, count, 0, &val);
if (ret)
return -EFAULT;
- val = simple_strtoul(line, NULL, 0);
adv7393_write(fbdev->client, val >> 8, val & 0xff);
return count;
diff --git a/drivers/video/broadsheetfb.c b/drivers/video/broadsheetfb.c
index 377dde3..c95b417 100644
--- a/drivers/video/broadsheetfb.c
+++ b/drivers/video/broadsheetfb.c
@@ -1211,7 +1211,7 @@ static int __devexit broadsheetfb_remove(struct platform_device *dev)
static struct platform_driver broadsheetfb_driver = {
.probe = broadsheetfb_probe,
- .remove = broadsheetfb_remove,
+ .remove = __devexit_p(broadsheetfb_remove),
.driver = {
.owner = THIS_MODULE,
.name = "broadsheetfb",
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index c2d11fe..e2c96d0 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -224,5 +224,19 @@ config FONT_10x18
big letters. It fits between the sun 12x22 and the normal 8x16 font.
If other fonts are too big or too small for you, say Y, otherwise say N.
+config FONT_AUTOSELECT
+ def_bool y
+ depends on FRAMEBUFFER_CONSOLE || SGI_NEWPORT_CONSOLE || STI_CONSOLE || USB_SISUSBVGA_CON
+ depends on !FONT_8x8
+ depends on !FONT_6x11
+ depends on !FONT_7x14
+ depends on !FONT_PEARL_8x8
+ depends on !FONT_ACORN_8x8
+ depends on !FONT_MINI_4x6
+ depends on !FONT_SUN8x16
+ depends on !FONT_SUN12x22
+ depends on !FONT_10x18
+ select FONT_8x16
+
endmenu
diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c
index ab0a8e5..85e4f44 100644
--- a/drivers/video/mbx/mbxfb.c
+++ b/drivers/video/mbx/mbxfb.c
@@ -1045,7 +1045,7 @@ static int __devexit mbxfb_remove(struct platform_device *dev)
static struct platform_driver mbxfb_driver = {
.probe = mbxfb_probe,
- .remove = mbxfb_remove,
+ .remove = __devexit_p(mbxfb_remove),
.suspend = mbxfb_suspend,
.resume = mbxfb_resume,
.driver = {
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index 2ce9992..901576e 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -526,7 +526,7 @@ static ssize_t taal_num_errors_show(struct device *dev,
{
struct omap_dss_device *dssdev = to_dss_device(dev);
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- u8 errors;
+ u8 errors = 0;
int r;
mutex_lock(&td->lock);
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 72ded9c..5066eee 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -194,8 +194,7 @@ static inline int dss_initialize_debugfs(void)
static inline void dss_uninitialize_debugfs(void)
{
}
-static inline int dss_debugfs_create_file(const char *name,
- void (*write)(struct seq_file *))
+int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
{
return 0;
}
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index ec363d8..ca8382d 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -3724,7 +3724,7 @@ static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs,
/* CLKIN4DDR = 16 * TXBYTECLKHS */
tlp_avail = thsbyte_clk * (blank - trans_lp);
- ttxclkesc = tdsi_fclk / lp_clk_div;
+ ttxclkesc = tdsi_fclk * lp_clk_div;
lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc -
26) / 16;
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 6ea1ff1..7706323 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -731,7 +731,7 @@ static void dss_runtime_put(void)
DSSDBG("dss_runtime_put\n");
r = pm_runtime_put_sync(&dss.pdev->dev);
- WARN_ON(r < 0);
+ WARN_ON(r < 0 && r != -EBUSY);
}
/* DEBUGFS */
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 5f9d8e690..ea7b661 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -361,7 +361,7 @@ static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk)
result = (unsigned int)tmp / 1000;
dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n",
- pixclk, clk, result, clk / result);
+ pixclk, clk, result, result ? clk / result : clk);
return result;
}
@@ -1348,8 +1348,14 @@ static void s3c_fb_clear_win(struct s3c_fb *sfb, int win)
writel(0, regs + VIDOSD_A(win, sfb->variant));
writel(0, regs + VIDOSD_B(win, sfb->variant));
writel(0, regs + VIDOSD_C(win, sfb->variant));
- reg = readl(regs + SHADOWCON);
- writel(reg & ~SHADOWCON_WINx_PROTECT(win), regs + SHADOWCON);
+
+ if (sfb->variant.has_shadowcon) {
+ reg = readl(sfb->regs + SHADOWCON);
+ reg &= ~(SHADOWCON_WINx_PROTECT(win) |
+ SHADOWCON_CHx_ENABLE(win) |
+ SHADOWCON_CHx_LOCAL_ENABLE(win));
+ writel(reg, sfb->regs + SHADOWCON);
+ }
}
static int __devinit s3c_fb_probe(struct platform_device *pdev)
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index cee7803..f3d3b9c 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -1351,7 +1351,7 @@ static void savagefb_set_par_int(struct savagefb_par *par, struct savage_reg *r
/* following part not present in X11 driver */
cr67 = vga_in8(0x3d5, par) & 0xf;
vga_out8(0x3d5, 0x50 | cr67, par);
- udelay(10000);
+ mdelay(10);
vga_out8(0x3d4, 0x67, par);
/* end of part */
vga_out8(0x3d5, reg->CR67 & ~0x0c, par);
@@ -1904,11 +1904,11 @@ static int savage_init_hw(struct savagefb_par *par)
vga_out8(0x3d4, 0x66, par);
cr66 = vga_in8(0x3d5, par);
vga_out8(0x3d5, cr66 | 0x02, par);
- udelay(10000);
+ mdelay(10);
vga_out8(0x3d4, 0x66, par);
vga_out8(0x3d5, cr66 & ~0x02, par); /* clear reset flag */
- udelay(10000);
+ mdelay(10);
/*
@@ -1918,11 +1918,11 @@ static int savage_init_hw(struct savagefb_par *par)
vga_out8(0x3d4, 0x3f, par);
cr3f = vga_in8(0x3d5, par);
vga_out8(0x3d5, cr3f | 0x08, par);
- udelay(10000);
+ mdelay(10);
vga_out8(0x3d4, 0x3f, par);
vga_out8(0x3d5, cr3f & ~0x08, par); /* clear reset flags */
- udelay(10000);
+ mdelay(10);
/* Savage ramdac speeds */
par->numClocks = 4;
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 2b76381..1eff743 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -146,7 +146,7 @@ struct cmn_registers {
} __attribute__((packed));
static unsigned int hpwdt_nmi_decoding;
-static unsigned int allow_kdump;
+static unsigned int allow_kdump = 1;
static unsigned int is_icru;
static DEFINE_SPINLOCK(rom_lock);
static void *cru_rom_addr;
@@ -756,6 +756,8 @@ error:
static void hpwdt_exit_nmi_decoding(void)
{
unregister_nmi_handler(NMI_UNKNOWN, "hpwdt");
+ unregister_nmi_handler(NMI_SERR, "hpwdt");
+ unregister_nmi_handler(NMI_IO_CHECK, "hpwdt");
if (cru_rom_addr)
iounmap(cru_rom_addr);
}
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index bc47e90..9c2c27c 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -699,3 +699,4 @@ MODULE_DESCRIPTION("Intel TCO WatchDog Timer Driver");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index afcd136..e4841c3 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -4,7 +4,7 @@
* Watchdog driver for ARM SP805 watchdog module
*
* Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2 or later. This program is licensed "as is" without any
@@ -331,6 +331,6 @@ static struct amba_driver sp805_wdt_driver = {
module_amba_driver(sp805_wdt_driver);
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_DESCRIPTION("ARM SP805 Watchdog Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 672d169..ef8edec 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -349,7 +349,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
sizeof(struct watchdog_info)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
err = watchdog_get_status(wdd, &val);
- if (err)
+ if (err == -ENODEV)
return err;
return put_user(val, p);
case WDIOC_GETBOOTSTATUS:
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 6908e4c..7595581 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -827,6 +827,9 @@ int bind_evtchn_to_irq(unsigned int evtchn)
handle_edge_irq, "event");
xen_irq_info_evtchn_init(irq, evtchn);
+ } else {
+ struct irq_info *info = info_for_irq(irq);
+ WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
}
out:
@@ -862,6 +865,9 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
bind_evtchn_to_cpu(evtchn, cpu);
+ } else {
+ struct irq_info *info = info_for_irq(irq);
+ WARN_ON(info == NULL || info->type != IRQT_IPI);
}
out:
@@ -939,6 +945,9 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
xen_irq_info_virq_init(cpu, irq, evtchn, virq);
bind_evtchn_to_cpu(evtchn, cpu);
+ } else {
+ struct irq_info *info = info_for_irq(irq);
+ WARN_ON(info == NULL || info->type != IRQT_VIRQ);
}
out:
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index b84bf0b..18fff88 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -59,7 +59,7 @@ static int xen_add_device(struct device *dev)
#ifdef CONFIG_ACPI
handle = DEVICE_ACPI_HANDLE(&pci_dev->dev);
- if (!handle)
+ if (!handle && pci_dev->bus->bridge)
handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge);
#ifdef CONFIG_PCI_IOV
if (!handle && pci_dev->is_virtfn)
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index dcb79521..89f264c 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -269,7 +269,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
}
/* returns 0 if the page was successfully put into frontswap, -1 if not */
-static int tmem_frontswap_put_page(unsigned type, pgoff_t offset,
+static int tmem_frontswap_store(unsigned type, pgoff_t offset,
struct page *page)
{
u64 ind64 = (u64)offset;
@@ -295,7 +295,7 @@ static int tmem_frontswap_put_page(unsigned type, pgoff_t offset,
* returns 0 if the page was successfully gotten from frontswap, -1 if
* was not present (should never happen!)
*/
-static int tmem_frontswap_get_page(unsigned type, pgoff_t offset,
+static int tmem_frontswap_load(unsigned type, pgoff_t offset,
struct page *page)
{
u64 ind64 = (u64)offset;
@@ -362,8 +362,8 @@ static int __init no_frontswap(char *s)
__setup("nofrontswap", no_frontswap);
static struct frontswap_ops __initdata tmem_frontswap_ops = {
- .put_page = tmem_frontswap_put_page,
- .get_page = tmem_frontswap_get_page,
+ .store = tmem_frontswap_store,
+ .load = tmem_frontswap_load,
.invalidate_page = tmem_frontswap_flush_page,
.invalidate_area = tmem_frontswap_flush_area,
.init = tmem_frontswap_init
OpenPOWER on IntegriCloud