summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJames Bottomley <JBottomley@Parallels.com>2012-05-21 12:17:30 +0100
committerJames Bottomley <JBottomley@Parallels.com>2012-05-21 12:17:30 +0100
commite34693336564f02b3e2cc09d8b872aef22a154e9 (patch)
tree09f51f10f9406042f9176e39b4dc8de850ba712e /drivers
parent76b311fdbdd2e16e5d39cd496a67aa1a1b948914 (diff)
parentde2eb4d5c5c25e8fb75d1e19092f24b83cb7d8d5 (diff)
downloadop-kernel-dev-e34693336564f02b3e2cc09d8b872aef22a154e9.zip
op-kernel-dev-e34693336564f02b3e2cc09d8b872aef22a154e9.tar.gz
Merge tag 'isci-for-3.5' into misc
isci update for 3.5 1/ Rework remote-node-context (RNC) handling for proper management of the silicon state machine in error handling and hot-plug conditions. Further details below, suffice to say if the RNC is mismanaged the silicon state machines may lock up. 2/ Refactor the initialization code to be reused for suspend/resume support 3/ Miscellaneous bug fixes to address discovery issues and hardware compatibility. RNC rework details from Jeff Skirvin: In the controller, devices as they appear on a SAS domain (or direct-attached SATA devices) are represented by memory structures known as "Remote Node Contexts" (RNCs). These structures are transferred from main memory to the controller using a set of register commands; these commands include setting up the context ("posting"), removing the context ("invalidating"), and commands to control the scheduling of commands and connections to that remote device ("suspensions" and "resumptions"). There is a similar path to control RNC scheduling from the protocol engine, which interprets the results of command and data transmission and reception. In general, the controller chooses among non-suspended RNCs to find one that has work requiring scheduling the transmission of command and data frames to a target. Likewise, when a target tries to return data back to the initiator, the state of the RNC is used by the controller to determine how to treat the incoming request. As an example, if the RNC is in the state "TX/RX Suspended", incoming SSP connection requests from the target will be rejected by the controller hardware. When an RNC is "TX Suspended", it will not be selected by the controller hardware to start outgoing command or data operations (with certain priority-based exceptions). As mentioned above, there are two sources for management of the RNC states: commands from driver software, and the result of transmission and reception conditions of commands and data signaled by the controller hardware. As an example of the latter, if an outgoing SSP command ends with a OPEN_REJECT(BAD_DESTINATION) status, the RNC state will transition to the "TX Suspended" state, and this is signaled by the controller hardware in the status to the completion of the pending command as well as signaled in a controller hardware event. Examples of the former are included in the patch changelogs. Driver software is required to suspend the RNC in a "TX/RX Suspended" condition before any outstanding commands can be terminated. Failure to guarantee this can lead to a complete hardware hang condition. Earlier versions of the driver software did not guarantee that an RNC was correctly managed before I/O termination, and so operated in an unsafe way. Further, the driver performed unnecessary contortions to preserve the remote device command state and so was more complicated than it needed to be. A simplifying driver assumption is that once an I/O has entered the error handler path without having completed in the target, the requirement on the driver is that all use of the sas_task must end. Beyond that, recovery of operation is dependent on libsas and other components to reset, rediscover and reconfigure the device before normal operation can restart. In the driver, this simplifying assumption meant that the RNC management could be reduced to entry into the suspended state, terminating the targeted I/O request, and resuming the RNC as needed for device-specific management such as an SSP Abort Task or LUN Reset Management request.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/hwxface.c3
-rw-r--r--drivers/acpi/power.c2
-rw-r--r--drivers/acpi/reboot.c3
-rw-r--r--drivers/acpi/scan.c17
-rw-r--r--drivers/acpi/sleep.c52
-rw-r--r--drivers/ata/ahci.c2
-rw-r--r--drivers/ata/ahci_platform.c1
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/libata-eh.c3
-rw-r--r--drivers/ata/libata-scsi.c38
-rw-r--r--drivers/ata/pata_arasan_cf.c4
-rw-r--r--drivers/base/regmap/regmap.c4
-rw-r--r--drivers/bcma/sprom.c7
-rw-r--r--drivers/block/drbd/drbd_nl.c2
-rw-r--r--drivers/block/xen-blkback/xenbus.c2
-rw-r--r--drivers/bluetooth/ath3k.c4
-rw-r--r--drivers/bluetooth/btusb.c6
-rw-r--r--drivers/crypto/ixp4xx_crypto.c1
-rw-r--r--drivers/crypto/talitos.c20
-rw-r--r--drivers/dma/Kconfig5
-rw-r--r--drivers/dma/amba-pl08x.c1
-rw-r--r--drivers/dma/at_hdmac.c4
-rw-r--r--drivers/dma/imx-dma.c9
-rw-r--r--drivers/dma/mxs-dma.c10
-rw-r--r--drivers/dma/pl330.c25
-rw-r--r--drivers/dma/ste_dma40.c323
-rw-r--r--drivers/dma/ste_dma40_ll.h2
-rw-r--r--drivers/firmware/efivars.c196
-rw-r--r--drivers/gpio/gpio-omap.c9
-rw-r--r--drivers/gpio/gpio-pch.c57
-rw-r--r--drivers/gpio/gpio-pxa.c21
-rw-r--r--drivers/gpio/gpio-samsung.c18
-rw-r--r--drivers/gpu/drm/drm_bufs.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c34
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c6
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c3
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c15
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c29
-rw-r--r--drivers/gpu/drm/i915/intel_display.c9
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c11
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hdmi.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c199
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv10_gpio.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fb.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c3
-rw-r--r--drivers/hid/Kconfig2
-rw-r--r--drivers/hid/hid-tivo.c2
-rw-r--r--drivers/hsi/clients/hsi_char.c2
-rw-r--r--drivers/hsi/hsi.c223
-rw-r--r--drivers/hwmon/ad7314.c12
-rw-r--r--drivers/hwmon/ads1015.c33
-rw-r--r--drivers/hwmon/coretemp.c6
-rw-r--r--drivers/hwmon/fam15h_power.c42
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c4
-rw-r--r--drivers/i2c/busses/i2c-mxs.c8
-rw-r--r--drivers/i2c/busses/i2c-pnx.c3
-rw-r--r--drivers/i2c/busses/i2c-tegra.c8
-rw-r--r--drivers/infiniband/core/mad.c8
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/input/misc/Kconfig3
-rw-r--r--drivers/input/misc/twl6040-vibra.c4
-rw-r--r--drivers/input/mouse/synaptics.c3
-rw-r--r--drivers/leds/leds-atmel-pwm.c2
-rw-r--r--drivers/leds/leds-netxbig.c4
-rw-r--r--drivers/leds/leds-ns2.c2
-rw-r--r--drivers/md/bitmap.c3
-rw-r--r--drivers/md/bitmap.h3
-rw-r--r--drivers/md/dm-log-userspace-transfer.c2
-rw-r--r--drivers/md/dm-mpath.c4
-rw-r--r--drivers/md/dm-raid.c4
-rw-r--r--drivers/md/dm-thin.c16
-rw-r--r--drivers/md/md.c7
-rw-r--r--drivers/media/common/tuners/xc5000.c39
-rw-r--r--drivers/media/common/tuners/xc5000.h1
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c25
-rw-r--r--drivers/media/dvb/frontends/drxk_hard.c6
-rw-r--r--drivers/media/rc/winbond-cir.c1
-rw-r--r--drivers/media/video/Kconfig2
-rw-r--r--drivers/media/video/mt9m032.c5
-rw-r--r--drivers/mfd/Kconfig11
-rw-r--r--drivers/mfd/asic3.c4
-rw-r--r--drivers/mfd/omap-usb-host.c45
-rw-r--r--drivers/mfd/rc5t583.c39
-rw-r--r--drivers/mfd/twl6040-core.c114
-rw-r--r--drivers/mmc/card/block.c56
-rw-r--r--drivers/mmc/card/queue.c2
-rw-r--r--drivers/mmc/core/bus.c24
-rw-r--r--drivers/mmc/core/cd-gpio.c1
-rw-r--r--drivers/mmc/core/core.c64
-rw-r--r--drivers/mmc/host/dw_mmc.c7
-rw-r--r--drivers/mmc/host/mxs-mmc.c3
-rw-r--r--drivers/mmc/host/omap_hsmmc.c6
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c3
-rw-r--r--drivers/mmc/host/sdhci.c2
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c1
-rw-r--r--drivers/net/arcnet/arc-rimi.c8
-rw-r--r--drivers/net/bonding/bond_3ad.c18
-rw-r--r--drivers/net/bonding/bond_3ad.h2
-rw-r--r--drivers/net/bonding/bond_main.c16
-rw-r--r--drivers/net/caif/caif_hsi.c9
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c2
-rw-r--r--drivers/net/dummy.c6
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c12
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.h3
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c23
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c92
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c52
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h7
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c6
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c62
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_phyp.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c15
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c99
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c24
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c43
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c39
-rw-r--r--drivers/net/ethernet/marvell/sky2.c31
-rw-r--r--drivers/net/ethernet/marvell/sky2.h1
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c28
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c2
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c10
-rw-r--r--drivers/net/ethernet/realtek/r8169.c16
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c17
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c5
-rw-r--r--drivers/net/ethernet/ti/tlan.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c6
-rw-r--r--drivers/net/hyperv/netvsc_drv.c38
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/macvtap.c43
-rw-r--r--drivers/net/phy/icplus.c12
-rw-r--r--drivers/net/ppp/ppp_generic.c15
-rw-r--r--drivers/net/usb/asix.c4
-rw-r--r--drivers/net/usb/cdc_ether.c14
-rw-r--r--drivers/net/usb/qmi_wwan.c30
-rw-r--r--drivers/net/usb/smsc75xx.c36
-rw-r--r--drivers/net/usb/smsc95xx.c3
-rw-r--r--drivers/net/usb/usbnet.c5
-rw-r--r--drivers/net/virtio_net.c5
-rw-r--r--drivers/net/wan/farsync.c1
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c10
-rw-r--r--drivers/net/wireless/b43/main.c10
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c64
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c11
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rx.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-mac80211.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h1
-rw-r--r--drivers/net/wireless/libertas/cfg.c9
-rw-r--r--drivers/net/wireless/mwifiex/pcie.h18
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c1
-rw-r--r--drivers/net/wireless/wl1251/main.c1
-rw-r--r--drivers/net/wireless/wl1251/sdio.c2
-rw-r--r--drivers/parisc/sba_iommu.c1
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/pci-acpi.c4
-rw-r--r--drivers/pinctrl/core.c25
-rw-r--r--drivers/platform/x86/acerhdf.c67
-rw-r--r--drivers/platform/x86/dell-laptop.c1
-rw-r--r--drivers/platform/x86/intel_ips.c2
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c2
-rw-r--r--drivers/regulator/core.c5
-rw-r--r--drivers/regulator/max8997.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c1
-rw-r--r--drivers/rtc/rtc-mpc5121.c3
-rw-r--r--drivers/s390/net/qeth_core_main.c6
-rw-r--r--drivers/scsi/hosts.c3
-rw-r--r--drivers/scsi/ipr.c6
-rw-r--r--drivers/scsi/isci/host.c703
-rw-r--r--drivers/scsi/isci/host.h124
-rw-r--r--drivers/scsi/isci/init.c212
-rw-r--r--drivers/scsi/isci/phy.c76
-rw-r--r--drivers/scsi/isci/phy.h9
-rw-r--r--drivers/scsi/isci/port.c68
-rw-r--r--drivers/scsi/isci/port.h11
-rw-r--r--drivers/scsi/isci/port_config.c18
-rw-r--r--drivers/scsi/isci/probe_roms.c12
-rw-r--r--drivers/scsi/isci/probe_roms.h2
-rw-r--r--drivers/scsi/isci/registers.h8
-rw-r--r--drivers/scsi/isci/remote_device.c576
-rw-r--r--drivers/scsi/isci/remote_device.h63
-rw-r--r--drivers/scsi/isci/remote_node_context.c393
-rw-r--r--drivers/scsi/isci/remote_node_context.h43
-rw-r--r--drivers/scsi/isci/request.c715
-rw-r--r--drivers/scsi/isci/request.h125
-rw-r--r--drivers/scsi/isci/scu_completion_codes.h2
-rw-r--r--drivers/scsi/isci/task.c800
-rw-r--r--drivers/scsi/isci/task.h132
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.c30
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.h6
-rw-r--r--drivers/scsi/libfc/fc_lport.c12
-rw-r--r--drivers/scsi/libsas/sas_ata.c33
-rw-r--r--drivers/scsi/libsas/sas_discover.c61
-rw-r--r--drivers/scsi/libsas/sas_event.c24
-rw-r--r--drivers/scsi/libsas/sas_expander.c56
-rw-r--r--drivers/scsi/libsas/sas_init.c11
-rw-r--r--drivers/scsi/libsas/sas_internal.h6
-rw-r--r--drivers/scsi/libsas/sas_phy.c21
-rw-r--r--drivers/scsi/libsas/sas_port.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/virtio_scsi.c24
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi-bcm63xx.c163
-rw-r--r--drivers/spi/spi-bfin-sport.c21
-rw-r--r--drivers/spi/spi-bfin5xx.c14
-rw-r--r--drivers/spi/spi-ep93xx.c24
-rw-r--r--drivers/spi/spi-pl022.c58
-rw-r--r--drivers/staging/octeon/ethernet-rx.c1
-rw-r--r--drivers/staging/octeon/ethernet-tx.c1
-rw-r--r--drivers/staging/octeon/ethernet.c1
-rw-r--r--drivers/staging/ozwpan/ozpd.c2
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c20
-rw-r--r--drivers/staging/tidspbridge/core/wdt.c8
-rw-r--r--drivers/staging/zcache/Kconfig2
-rw-r--r--drivers/target/target_core_tpg.c22
-rw-r--r--drivers/tty/amiserial.c4
-rw-r--r--drivers/tty/serial/clps711x.c14
-rw-r--r--drivers/tty/serial/pch_uart.c4
-rw-r--r--drivers/tty/serial/pmac_zilog.c6
-rw-r--r--drivers/tty/vt/keyboard.c26
-rw-r--r--drivers/usb/class/cdc-wdm.c7
-rw-r--r--drivers/usb/core/hcd-pci.c9
-rw-r--r--drivers/usb/core/hub.c3
-rw-r--r--drivers/usb/core/message.c6
-rw-r--r--drivers/usb/dwc3/core.c6
-rw-r--r--drivers/usb/dwc3/ep0.c12
-rw-r--r--drivers/usb/gadget/at91_udc.c8
-rw-r--r--drivers/usb/gadget/dummy_hcd.c1
-rw-r--r--drivers/usb/gadget/f_fs.c3
-rw-r--r--drivers/usb/gadget/f_mass_storage.c2
-rw-r--r--drivers/usb/gadget/f_rndis.c1
-rw-r--r--drivers/usb/gadget/file_storage.c2
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c25
-rw-r--r--drivers/usb/gadget/g_ffs.c4
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c17
-rw-r--r--drivers/usb/gadget/udc-core.c6
-rw-r--r--drivers/usb/gadget/uvc.h2
-rw-r--r--drivers/usb/gadget/uvc_queue.c4
-rw-r--r--drivers/usb/gadget/uvc_v4l2.c2
-rw-r--r--drivers/usb/host/ehci-fsl.c7
-rw-r--r--drivers/usb/host/ehci-hcd.c9
-rw-r--r--drivers/usb/host/ehci-omap.c39
-rw-r--r--drivers/usb/host/ehci-pci.c8
-rw-r--r--drivers/usb/host/ehci-tegra.c377
-rw-r--r--drivers/usb/host/ohci-at91.c12
-rw-r--r--drivers/usb/misc/usbtest.c9
-rw-r--r--drivers/usb/misc/yurex.c10
-rw-r--r--drivers/usb/musb/davinci.c3
-rw-r--r--drivers/usb/musb/musb_core.c40
-rw-r--r--drivers/usb/musb/musb_core.h2
-rw-r--r--drivers/usb/musb/musb_host.c2
-rw-r--r--drivers/usb/musb/omap2430.c31
-rw-r--r--drivers/usb/otg/gpio_vbus.c15
-rw-r--r--drivers/usb/serial/cp210x.c9
-rw-r--r--drivers/usb/serial/sierra.c6
-rw-r--r--drivers/uwb/hwa-rc.c3
-rw-r--r--drivers/uwb/neh.c12
-rw-r--r--drivers/vhost/net.c9
-rw-r--r--drivers/vhost/vhost.c5
-rw-r--r--drivers/vhost/vhost.h2
-rw-r--r--drivers/video/bfin-lq035q1-fb.c1
-rw-r--r--drivers/video/console/sticore.c2
-rw-r--r--drivers/video/uvesafb.c2
-rw-r--r--drivers/video/xen-fbfront.c27
-rw-r--r--drivers/watchdog/hpwdt.c6
-rw-r--r--drivers/xen/Kconfig22
-rw-r--r--drivers/xen/events.c2
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/grant-table.c13
-rw-r--r--drivers/xen/manage.c1
-rw-r--r--drivers/xen/xen-acpi-processor.c5
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c69
324 files changed, 4907 insertions, 4349 deletions
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index ab513a9..a716fed 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -74,7 +74,8 @@ acpi_status acpi_reset(void)
/* Check if the reset register is supported */
- if (!reset_reg->address) {
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) ||
+ !reset_reg->address) {
return_ACPI_STATUS(AE_NOT_EXIST);
}
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 7049a7d..330bb4d 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -631,7 +631,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
* We know a device's inferred power state when all the resources
* required for a given D-state are 'on'.
*/
- for (i = ACPI_STATE_D0; i < ACPI_STATE_D3; i++) {
+ for (i = ACPI_STATE_D0; i < ACPI_STATE_D3_HOT; i++) {
list = &device->power.states[i].resources;
if (list->count < 1)
continue;
diff --git a/drivers/acpi/reboot.c b/drivers/acpi/reboot.c
index c1d6124..a6c77e8b 100644
--- a/drivers/acpi/reboot.c
+++ b/drivers/acpi/reboot.c
@@ -23,7 +23,8 @@ void acpi_reboot(void)
/* Is the reset register supported? The spec says we should be
* checking the bit width and bit offset, but Windows ignores
* these fields */
- /* Ignore also acpi_gbl_FADT.flags.ACPI_FADT_RESET_REGISTER */
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER))
+ return;
reset_value = acpi_gbl_FADT.reset_value;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 767e2dc..7417267 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -869,7 +869,7 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
/*
* Enumerate supported power management states
*/
- for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3; i++) {
+ for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
struct acpi_device_power_state *ps = &device->power.states[i];
char object_name[5] = { '_', 'P', 'R', '0' + i, '\0' };
@@ -884,21 +884,18 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
acpi_bus_add_power_resource(ps->resources.handles[j]);
}
- /* The exist of _PR3 indicates D3Cold support */
- if (i == ACPI_STATE_D3) {
- status = acpi_get_handle(device->handle, object_name, &handle);
- if (ACPI_SUCCESS(status))
- device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
- }
-
/* Evaluate "_PSx" to see if we can do explicit sets */
object_name[2] = 'S';
status = acpi_get_handle(device->handle, object_name, &handle);
if (ACPI_SUCCESS(status))
ps->flags.explicit_set = 1;
- /* State is valid if we have some power control */
- if (ps->resources.count || ps->flags.explicit_set)
+ /*
+ * State is valid if there are means to put the device into it.
+ * D3hot is only valid if _PR3 present.
+ */
+ if (ps->resources.count ||
+ (ps->flags.explicit_set && i < ACPI_STATE_D3_HOT))
ps->flags.valid = 1;
ps->power = -1; /* Unknown - driver assigned */
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 1d661b5..eb6fd23 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -28,23 +28,33 @@
#include "internal.h"
#include "sleep.h"
+u8 wake_sleep_flags = ACPI_NO_OPTIONAL_METHODS;
static unsigned int gts, bfs;
-module_param(gts, uint, 0644);
-module_param(bfs, uint, 0644);
-MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
-MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
-
-static u8 wake_sleep_flags(void)
+static int set_param_wake_flag(const char *val, struct kernel_param *kp)
{
- u8 flags = ACPI_NO_OPTIONAL_METHODS;
+ int ret = param_set_int(val, kp);
- if (gts)
- flags |= ACPI_EXECUTE_GTS;
- if (bfs)
- flags |= ACPI_EXECUTE_BFS;
+ if (ret)
+ return ret;
- return flags;
+ if (kp->arg == (const char *)&gts) {
+ if (gts)
+ wake_sleep_flags |= ACPI_EXECUTE_GTS;
+ else
+ wake_sleep_flags &= ~ACPI_EXECUTE_GTS;
+ }
+ if (kp->arg == (const char *)&bfs) {
+ if (bfs)
+ wake_sleep_flags |= ACPI_EXECUTE_BFS;
+ else
+ wake_sleep_flags &= ~ACPI_EXECUTE_BFS;
+ }
+ return ret;
}
+module_param_call(gts, set_param_wake_flag, param_get_int, &gts, 0644);
+module_param_call(bfs, set_param_wake_flag, param_get_int, &bfs, 0644);
+MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
+MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
static u8 sleep_states[ACPI_S_STATE_COUNT];
@@ -263,7 +273,6 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
{
acpi_status status = AE_OK;
u32 acpi_state = acpi_target_sleep_state;
- u8 flags = wake_sleep_flags();
int error;
ACPI_FLUSH_CPU_CACHE();
@@ -271,7 +280,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
switch (acpi_state) {
case ACPI_STATE_S1:
barrier();
- status = acpi_enter_sleep_state(acpi_state, flags);
+ status = acpi_enter_sleep_state(acpi_state, wake_sleep_flags);
break;
case ACPI_STATE_S3:
@@ -286,7 +295,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
/* Reprogram control registers and execute _BFS */
- acpi_leave_sleep_state_prep(acpi_state, flags);
+ acpi_leave_sleep_state_prep(acpi_state, wake_sleep_flags);
/* ACPI 3.0 specs (P62) says that it's the responsibility
* of the OSPM to clear the status bit [ implying that the
@@ -550,30 +559,27 @@ static int acpi_hibernation_begin(void)
static int acpi_hibernation_enter(void)
{
- u8 flags = wake_sleep_flags();
acpi_status status = AE_OK;
ACPI_FLUSH_CPU_CACHE();
/* This shouldn't return. If it returns, we have a problem */
- status = acpi_enter_sleep_state(ACPI_STATE_S4, flags);
+ status = acpi_enter_sleep_state(ACPI_STATE_S4, wake_sleep_flags);
/* Reprogram control registers and execute _BFS */
- acpi_leave_sleep_state_prep(ACPI_STATE_S4, flags);
+ acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags);
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
}
static void acpi_hibernation_leave(void)
{
- u8 flags = wake_sleep_flags();
-
/*
* If ACPI is not enabled by the BIOS and the boot kernel, we need to
* enable it here.
*/
acpi_enable();
/* Reprogram control registers and execute _BFS */
- acpi_leave_sleep_state_prep(ACPI_STATE_S4, flags);
+ acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags);
/* Check the hardware signature */
if (facs && s4_hardware_signature != facs->hardware_signature) {
printk(KERN_EMERG "ACPI: Hardware changed while hibernated, "
@@ -828,12 +834,10 @@ static void acpi_power_off_prepare(void)
static void acpi_power_off(void)
{
- u8 flags = wake_sleep_flags();
-
/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
printk(KERN_DEBUG "%s called\n", __func__);
local_irq_disable();
- acpi_enter_sleep_state(ACPI_STATE_S5, flags);
+ acpi_enter_sleep_state(ACPI_STATE_S5, wake_sleep_flags);
}
/*
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 79a1e9d..ebaf67e 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -394,6 +394,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs }, /* 88se9128 */
{ PCI_DEVICE(0x1b4b, 0x9125),
.driver_data = board_ahci_yes_fbs }, /* 88se9125 */
+ { PCI_DEVICE(0x1b4b, 0x917a),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
{ PCI_DEVICE(0x1b4b, 0x91a3),
.driver_data = board_ahci_yes_fbs },
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 0c86c77..9e419e1 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -280,6 +280,7 @@ static struct dev_pm_ops ahci_pm_ops = {
static const struct of_device_id ahci_of_match[] = {
{ .compatible = "calxeda,hb-ahci", },
+ { .compatible = "snps,spear-ahci", },
{},
};
MODULE_DEVICE_TABLE(of, ahci_of_match);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 28db50b..23763a1 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -95,7 +95,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
static void ata_dev_xfermask(struct ata_device *dev);
static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
-atomic_t ata_print_id = ATOMIC_INIT(1);
+atomic_t ata_print_id = ATOMIC_INIT(0);
struct ata_force_param {
const char *name;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c61316e..d1fbd59 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -3501,7 +3501,8 @@ static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg
u64 now = get_jiffies_64();
int *trials = void_arg;
- if (ent->timestamp < now - min(now, interval))
+ if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
+ (ent->timestamp < now - min(now, interval)))
return -1;
(*trials)++;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 93dabdc..2222635 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3399,7 +3399,8 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
*/
shost->max_host_blocked = 1;
- rc = scsi_add_host(ap->scsi_host, &ap->tdev);
+ rc = scsi_add_host_with_dma(ap->scsi_host,
+ &ap->tdev, ap->host->dev);
if (rc)
goto err_add;
}
@@ -3838,18 +3839,25 @@ void ata_sas_port_stop(struct ata_port *ap)
}
EXPORT_SYMBOL_GPL(ata_sas_port_stop);
-int ata_sas_async_port_init(struct ata_port *ap)
+/**
+ * ata_sas_async_probe - simply schedule probing and return
+ * @ap: Port to probe
+ *
+ * For batch scheduling of probe for sas attached ata devices, assumes
+ * the port has already been through ata_sas_port_init()
+ */
+void ata_sas_async_probe(struct ata_port *ap)
{
- int rc = ap->ops->port_start(ap);
-
- if (!rc) {
- ap->print_id = atomic_inc_return(&ata_print_id);
- __ata_port_probe(ap);
- }
+ __ata_port_probe(ap);
+}
+EXPORT_SYMBOL_GPL(ata_sas_async_probe);
- return rc;
+int ata_sas_sync_probe(struct ata_port *ap)
+{
+ return ata_port_probe(ap);
}
-EXPORT_SYMBOL_GPL(ata_sas_async_port_init);
+EXPORT_SYMBOL_GPL(ata_sas_sync_probe);
+
/**
* ata_sas_port_init - Initialize a SATA device
@@ -3866,12 +3874,10 @@ int ata_sas_port_init(struct ata_port *ap)
{
int rc = ap->ops->port_start(ap);
- if (!rc) {
- ap->print_id = atomic_inc_return(&ata_print_id);
- rc = ata_port_probe(ap);
- }
-
- return rc;
+ if (rc)
+ return rc;
+ ap->print_id = atomic_inc_return(&ata_print_id);
+ return 0;
}
EXPORT_SYMBOL_GPL(ata_sas_port_init);
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index fc2db2a..3239517 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -943,9 +943,9 @@ static int arasan_cf_resume(struct device *dev)
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(arasan_cf_pm_ops, arasan_cf_suspend, arasan_cf_resume);
-#endif
static struct platform_driver arasan_cf_driver = {
.probe = arasan_cf_probe,
@@ -953,9 +953,7 @@ static struct platform_driver arasan_cf_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &arasan_cf_pm_ops,
-#endif
},
};
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 7a3f535..bb80853 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -775,9 +775,11 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
map->format.parse_val(val + i);
} else {
for (i = 0; i < val_count; i++) {
- ret = regmap_read(map, reg + i, val + (i * val_bytes));
+ unsigned int ival;
+ ret = regmap_read(map, reg + i, &ival);
if (ret != 0)
return ret;
+ memcpy(val + (i * val_bytes), &ival, val_bytes);
}
}
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index cdcf75c..3e2a600 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -404,16 +404,19 @@ int bcma_sprom_get(struct bcma_bus *bus)
return -EOPNOTSUPP;
if (!bcma_sprom_ext_available(bus)) {
+ bool sprom_onchip;
+
/*
* External SPROM takes precedence so check
* on-chip OTP only when no external SPROM
* is present.
*/
- if (bcma_sprom_onchip_available(bus)) {
+ sprom_onchip = bcma_sprom_onchip_available(bus);
+ if (sprom_onchip) {
/* determine offset */
offset = bcma_sprom_onchip_offset(bus);
}
- if (!offset) {
+ if (!offset || !sprom_onchip) {
/*
* Maybe there is no SPROM on the device?
* Now we ask the arch code if there is some sprom
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index abfaaca..946166e 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -2297,7 +2297,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
return;
}
- if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) {
+ if (!capable(CAP_SYS_ADMIN)) {
retcode = ERR_PERM;
goto fail;
}
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 89860f3..4f66171 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -416,7 +416,7 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info
"discard-secure", "%d",
blkif->vbd.discard_secure);
if (err) {
- dev_warn(dev-dev, "writing discard-secure (%d)", err);
+ dev_warn(&dev->dev, "writing discard-secure (%d)", err);
return;
}
}
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index ae9edca..57fd867 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -75,6 +75,8 @@ static struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0CF3, 0x311D) },
{ USB_DEVICE(0x13d3, 0x3375) },
{ USB_DEVICE(0x04CA, 0x3005) },
+ { USB_DEVICE(0x13d3, 0x3362) },
+ { USB_DEVICE(0x0CF3, 0xE004) },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) },
@@ -94,6 +96,8 @@ static struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ } /* Terminating entry */
};
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 3311b81..9217121 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -101,12 +101,16 @@ static struct usb_device_id btusb_table[] = {
{ USB_DEVICE(0x0c10, 0x0000) },
/* Broadcom BCM20702A0 */
+ { USB_DEVICE(0x0489, 0xe042) },
{ USB_DEVICE(0x0a5c, 0x21e3) },
{ USB_DEVICE(0x0a5c, 0x21e6) },
{ USB_DEVICE(0x0a5c, 0x21e8) },
{ USB_DEVICE(0x0a5c, 0x21f3) },
{ USB_DEVICE(0x413c, 0x8197) },
+ /* Foxconn - Hon Hai */
+ { USB_DEVICE(0x0489, 0xe033) },
+
{ } /* Terminating entry */
};
@@ -133,6 +137,8 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 0053d7e..8f3f74c 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/gfp.h>
+#include <linux/module.h>
#include <crypto/ctr.h>
#include <crypto/des.h>
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index dc641c7..921039e 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -124,6 +124,9 @@ struct talitos_private {
void __iomem *reg;
int irq[2];
+ /* SEC global registers lock */
+ spinlock_t reg_lock ____cacheline_aligned;
+
/* SEC version geometry (from device tree node) */
unsigned int num_channels;
unsigned int chfifo_len;
@@ -412,6 +415,7 @@ static void talitos_done_##name(unsigned long data) \
{ \
struct device *dev = (struct device *)data; \
struct talitos_private *priv = dev_get_drvdata(dev); \
+ unsigned long flags; \
\
if (ch_done_mask & 1) \
flush_channel(dev, 0, 0, 0); \
@@ -427,8 +431,10 @@ static void talitos_done_##name(unsigned long data) \
out: \
/* At this point, all completed channels have been processed */ \
/* Unmask done interrupts for channels completed later on. */ \
+ spin_lock_irqsave(&priv->reg_lock, flags); \
setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \
+ spin_unlock_irqrestore(&priv->reg_lock, flags); \
}
DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE)
DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE)
@@ -619,22 +625,28 @@ static irqreturn_t talitos_interrupt_##name(int irq, void *data) \
struct device *dev = data; \
struct talitos_private *priv = dev_get_drvdata(dev); \
u32 isr, isr_lo; \
+ unsigned long flags; \
\
+ spin_lock_irqsave(&priv->reg_lock, flags); \
isr = in_be32(priv->reg + TALITOS_ISR); \
isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
/* Acknowledge interrupt */ \
out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
\
- if (unlikely((isr & ~TALITOS_ISR_4CHDONE) & ch_err_mask || isr_lo)) \
- talitos_error(dev, isr, isr_lo); \
- else \
+ if (unlikely(isr & ch_err_mask || isr_lo)) { \
+ spin_unlock_irqrestore(&priv->reg_lock, flags); \
+ talitos_error(dev, isr & ch_err_mask, isr_lo); \
+ } \
+ else { \
if (likely(isr & ch_done_mask)) { \
/* mask further done interrupts. */ \
clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
/* done_task will unmask done interrupts at exit */ \
tasklet_schedule(&priv->done_task[tlet]); \
} \
+ spin_unlock_irqrestore(&priv->reg_lock, flags); \
+ } \
\
return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
IRQ_NONE; \
@@ -2719,6 +2731,8 @@ static int talitos_probe(struct platform_device *ofdev)
priv->ofdev = ofdev;
+ spin_lock_init(&priv->reg_lock);
+
err = talitos_probe_irq(ofdev);
if (err)
goto err_out;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index cf9da36..ef378b5 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -91,11 +91,10 @@ config DW_DMAC
config AT_HDMAC
tristate "Atmel AHB DMA support"
- depends on ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
+ depends on ARCH_AT91
select DMA_ENGINE
help
- Support the Atmel AHB DMA controller. This can be integrated in
- chips such as the Atmel AT91SAM9RL.
+ Support the Atmel AHB DMA controller.
config FSL_DMA
tristate "Freescale Elo and Elo Plus DMA support"
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index c301a8e..3d704ab 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1429,6 +1429,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* signal
*/
release_phy_channel(plchan);
+ plchan->phychan_hold = 0;
}
/* Dequeue jobs and free LLIs */
if (plchan->at) {
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 7aa58d2..445fdf8 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -221,10 +221,6 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
vdbg_dump_regs(atchan);
- /* clear any pending interrupt */
- while (dma_readl(atdma, EBCISR))
- cpu_relax();
-
channel_writel(atchan, SADDR, 0);
channel_writel(atchan, DADDR, 0);
channel_writel(atchan, CTRLA, 0);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index a45b5d2..bb787d8 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -571,11 +571,14 @@ static void imxdma_tasklet(unsigned long data)
if (desc->desc.callback)
desc->desc.callback(desc->desc.callback_param);
- dma_cookie_complete(&desc->desc);
-
- /* If we are dealing with a cyclic descriptor keep it on ld_active */
+ /* If we are dealing with a cyclic descriptor keep it on ld_active
+ * and dont mark the descripor as complete.
+ * Only in non-cyclic cases it would be marked as complete
+ */
if (imxdma_chan_is_doing_cyclic(imxdmac))
goto out;
+ else
+ dma_cookie_complete(&desc->desc);
/* Free 2D slot if it was an interleaved transfer */
if (imxdmac->enabled_2d) {
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index c81ef7e..655d4ce6 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -201,10 +201,6 @@ static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
- struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(tx->chan);
-
- mxs_dma_enable_chan(mxs_chan);
-
return dma_cookie_assign(tx);
}
@@ -558,9 +554,9 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan,
static void mxs_dma_issue_pending(struct dma_chan *chan)
{
- /*
- * Nothing to do. We only have a single descriptor.
- */
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+
+ mxs_dma_enable_chan(mxs_chan);
}
static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 282caf1..2ee6e23 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2225,12 +2225,9 @@ static inline void free_desc_list(struct list_head *list)
{
struct dma_pl330_dmac *pdmac;
struct dma_pl330_desc *desc;
- struct dma_pl330_chan *pch;
+ struct dma_pl330_chan *pch = NULL;
unsigned long flags;
- if (list_empty(list))
- return;
-
/* Finish off the work list */
list_for_each_entry(desc, list, node) {
dma_async_tx_callback callback;
@@ -2247,6 +2244,10 @@ static inline void free_desc_list(struct list_head *list)
desc->pchan = NULL;
}
+ /* pch will be unset if list was empty */
+ if (!pch)
+ return;
+
pdmac = pch->dmac;
spin_lock_irqsave(&pdmac->pool_lock, flags);
@@ -2257,12 +2258,9 @@ static inline void free_desc_list(struct list_head *list)
static inline void handle_cyclic_desc_list(struct list_head *list)
{
struct dma_pl330_desc *desc;
- struct dma_pl330_chan *pch;
+ struct dma_pl330_chan *pch = NULL;
unsigned long flags;
- if (list_empty(list))
- return;
-
list_for_each_entry(desc, list, node) {
dma_async_tx_callback callback;
@@ -2274,6 +2272,10 @@ static inline void handle_cyclic_desc_list(struct list_head *list)
callback(desc->txd.callback_param);
}
+ /* pch will be unset if list was empty */
+ if (!pch)
+ return;
+
spin_lock_irqsave(&pch->lock, flags);
list_splice_tail_init(list, &pch->work_list);
spin_unlock_irqrestore(&pch->lock, flags);
@@ -2926,8 +2928,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
INIT_LIST_HEAD(&pd->channels);
/* Initialize channel parameters */
- num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri,
- (u8)pi->pcfg.num_chan);
+ if (pdat)
+ num_chan = max_t(int, pdat->nr_valid_peri, pi->pcfg.num_chan);
+ else
+ num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
+
pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
for (i = 0; i < num_chan; i++) {
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index bdd41d4..2ed1ac3 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -18,6 +18,7 @@
#include <linux/pm_runtime.h>
#include <linux/err.h>
#include <linux/amba/bus.h>
+#include <linux/regulator/consumer.h>
#include <plat/ste_dma40.h>
@@ -69,6 +70,22 @@ enum d40_command {
};
/*
+ * enum d40_events - The different Event Enables for the event lines.
+ *
+ * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
+ * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
+ * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
+ * @D40_ROUND_EVENTLINE: Status check for event line.
+ */
+
+enum d40_events {
+ D40_DEACTIVATE_EVENTLINE = 0,
+ D40_ACTIVATE_EVENTLINE = 1,
+ D40_SUSPEND_REQ_EVENTLINE = 2,
+ D40_ROUND_EVENTLINE = 3
+};
+
+/*
* These are the registers that has to be saved and later restored
* when the DMA hw is powered off.
* TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
@@ -870,8 +887,8 @@ static void d40_save_restore_registers(struct d40_base *base, bool save)
}
#endif
-static int d40_channel_execute_command(struct d40_chan *d40c,
- enum d40_command command)
+static int __d40_execute_command_phy(struct d40_chan *d40c,
+ enum d40_command command)
{
u32 status;
int i;
@@ -880,6 +897,12 @@ static int d40_channel_execute_command(struct d40_chan *d40c,
unsigned long flags;
u32 wmask;
+ if (command == D40_DMA_STOP) {
+ ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
+ if (ret)
+ return ret;
+ }
+
spin_lock_irqsave(&d40c->base->execmd_lock, flags);
if (d40c->phy_chan->num % 2 == 0)
@@ -973,67 +996,109 @@ static void d40_term_all(struct d40_chan *d40c)
}
d40c->pending_tx = 0;
- d40c->busy = false;
}
-static void __d40_config_set_event(struct d40_chan *d40c, bool enable,
- u32 event, int reg)
+static void __d40_config_set_event(struct d40_chan *d40c,
+ enum d40_events event_type, u32 event,
+ int reg)
{
void __iomem *addr = chan_base(d40c) + reg;
int tries;
+ u32 status;
+
+ switch (event_type) {
+
+ case D40_DEACTIVATE_EVENTLINE:
- if (!enable) {
writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
| ~D40_EVENTLINE_MASK(event), addr);
- return;
- }
+ break;
+
+ case D40_SUSPEND_REQ_EVENTLINE:
+ status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
+ D40_EVENTLINE_POS(event);
+
+ if (status == D40_DEACTIVATE_EVENTLINE ||
+ status == D40_SUSPEND_REQ_EVENTLINE)
+ break;
+ writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
+ | ~D40_EVENTLINE_MASK(event), addr);
+
+ for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
+
+ status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
+ D40_EVENTLINE_POS(event);
+
+ cpu_relax();
+ /*
+ * Reduce the number of bus accesses while
+ * waiting for the DMA to suspend.
+ */
+ udelay(3);
+
+ if (status == D40_DEACTIVATE_EVENTLINE)
+ break;
+ }
+
+ if (tries == D40_SUSPEND_MAX_IT) {
+ chan_err(d40c,
+ "unable to stop the event_line chl %d (log: %d)"
+ "status %x\n", d40c->phy_chan->num,
+ d40c->log_num, status);
+ }
+ break;
+
+ case D40_ACTIVATE_EVENTLINE:
/*
* The hardware sometimes doesn't register the enable when src and dst
* event lines are active on the same logical channel. Retry to ensure
* it does. Usually only one retry is sufficient.
*/
- tries = 100;
- while (--tries) {
- writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
- | ~D40_EVENTLINE_MASK(event), addr);
+ tries = 100;
+ while (--tries) {
+ writel((D40_ACTIVATE_EVENTLINE <<
+ D40_EVENTLINE_POS(event)) |
+ ~D40_EVENTLINE_MASK(event), addr);
- if (readl(addr) & D40_EVENTLINE_MASK(event))
- break;
- }
+ if (readl(addr) & D40_EVENTLINE_MASK(event))
+ break;
+ }
- if (tries != 99)
- dev_dbg(chan2dev(d40c),
- "[%s] workaround enable S%cLNK (%d tries)\n",
- __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
- 100 - tries);
+ if (tries != 99)
+ dev_dbg(chan2dev(d40c),
+ "[%s] workaround enable S%cLNK (%d tries)\n",
+ __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
+ 100 - tries);
- WARN_ON(!tries);
-}
+ WARN_ON(!tries);
+ break;
-static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
-{
- unsigned long flags;
+ case D40_ROUND_EVENTLINE:
+ BUG();
+ break;
- spin_lock_irqsave(&d40c->phy_chan->lock, flags);
+ }
+}
+static void d40_config_set_event(struct d40_chan *d40c,
+ enum d40_events event_type)
+{
/* Enable event line connected to device (or memcpy) */
if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
(d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
- __d40_config_set_event(d40c, do_enable, event,
+ __d40_config_set_event(d40c, event_type, event,
D40_CHAN_REG_SSLNK);
}
if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
- __d40_config_set_event(d40c, do_enable, event,
+ __d40_config_set_event(d40c, event_type, event,
D40_CHAN_REG_SDLNK);
}
-
- spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
}
static u32 d40_chan_has_events(struct d40_chan *d40c)
@@ -1047,6 +1112,64 @@ static u32 d40_chan_has_events(struct d40_chan *d40c)
return val;
}
+static int
+__d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
+{
+ unsigned long flags;
+ int ret = 0;
+ u32 active_status;
+ void __iomem *active_reg;
+
+ if (d40c->phy_chan->num % 2 == 0)
+ active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
+ else
+ active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
+
+
+ spin_lock_irqsave(&d40c->phy_chan->lock, flags);
+
+ switch (command) {
+ case D40_DMA_STOP:
+ case D40_DMA_SUSPEND_REQ:
+
+ active_status = (readl(active_reg) &
+ D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
+ D40_CHAN_POS(d40c->phy_chan->num);
+
+ if (active_status == D40_DMA_RUN)
+ d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
+ else
+ d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
+
+ if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
+ ret = __d40_execute_command_phy(d40c, command);
+
+ break;
+
+ case D40_DMA_RUN:
+
+ d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
+ ret = __d40_execute_command_phy(d40c, command);
+ break;
+
+ case D40_DMA_SUSPENDED:
+ BUG();
+ break;
+ }
+
+ spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
+ return ret;
+}
+
+static int d40_channel_execute_command(struct d40_chan *d40c,
+ enum d40_command command)
+{
+ if (chan_is_logical(d40c))
+ return __d40_execute_command_log(d40c, command);
+ else
+ return __d40_execute_command_phy(d40c, command);
+}
+
static u32 d40_get_prmo(struct d40_chan *d40c)
{
static const unsigned int phy_map[] = {
@@ -1149,15 +1272,7 @@ static int d40_pause(struct d40_chan *d40c)
spin_lock_irqsave(&d40c->lock, flags);
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
- if (res == 0) {
- if (chan_is_logical(d40c)) {
- d40_config_set_event(d40c, false);
- /* Resume the other logical channels if any */
- if (d40_chan_has_events(d40c))
- res = d40_channel_execute_command(d40c,
- D40_DMA_RUN);
- }
- }
+
pm_runtime_mark_last_busy(d40c->base->dev);
pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
@@ -1174,45 +1289,17 @@ static int d40_resume(struct d40_chan *d40c)
spin_lock_irqsave(&d40c->lock, flags);
pm_runtime_get_sync(d40c->base->dev);
- if (d40c->base->rev == 0)
- if (chan_is_logical(d40c)) {
- res = d40_channel_execute_command(d40c,
- D40_DMA_SUSPEND_REQ);
- goto no_suspend;
- }
/* If bytes left to transfer or linked tx resume job */
- if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
-
- if (chan_is_logical(d40c))
- d40_config_set_event(d40c, true);
-
+ if (d40_residue(d40c) || d40_tx_is_linked(d40c))
res = d40_channel_execute_command(d40c, D40_DMA_RUN);
- }
-no_suspend:
pm_runtime_mark_last_busy(d40c->base->dev);
pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
return res;
}
-static int d40_terminate_all(struct d40_chan *chan)
-{
- unsigned long flags;
- int ret = 0;
-
- ret = d40_pause(chan);
- if (!ret && chan_is_physical(chan))
- ret = d40_channel_execute_command(chan, D40_DMA_STOP);
-
- spin_lock_irqsave(&chan->lock, flags);
- d40_term_all(chan);
- spin_unlock_irqrestore(&chan->lock, flags);
-
- return ret;
-}
-
static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct d40_chan *d40c = container_of(tx->chan,
@@ -1232,20 +1319,6 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
static int d40_start(struct d40_chan *d40c)
{
- if (d40c->base->rev == 0) {
- int err;
-
- if (chan_is_logical(d40c)) {
- err = d40_channel_execute_command(d40c,
- D40_DMA_SUSPEND_REQ);
- if (err)
- return err;
- }
- }
-
- if (chan_is_logical(d40c))
- d40_config_set_event(d40c, true);
-
return d40_channel_execute_command(d40c, D40_DMA_RUN);
}
@@ -1258,10 +1331,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
d40d = d40_first_queued(d40c);
if (d40d != NULL) {
- if (!d40c->busy)
+ if (!d40c->busy) {
d40c->busy = true;
-
- pm_runtime_get_sync(d40c->base->dev);
+ pm_runtime_get_sync(d40c->base->dev);
+ }
/* Remove from queue */
d40_desc_remove(d40d);
@@ -1388,8 +1461,8 @@ static void dma_tasklet(unsigned long data)
return;
- err:
- /* Rescue manoeuvre if receiving double interrupts */
+err:
+ /* Rescue manouver if receiving double interrupts */
if (d40c->pending_tx > 0)
d40c->pending_tx--;
spin_unlock_irqrestore(&d40c->lock, flags);
@@ -1770,7 +1843,6 @@ static int d40_config_memcpy(struct d40_chan *d40c)
return 0;
}
-
static int d40_free_dma(struct d40_chan *d40c)
{
@@ -1806,43 +1878,18 @@ static int d40_free_dma(struct d40_chan *d40c)
}
pm_runtime_get_sync(d40c->base->dev);
- res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
+ res = d40_channel_execute_command(d40c, D40_DMA_STOP);
if (res) {
- chan_err(d40c, "suspend failed\n");
+ chan_err(d40c, "stop failed\n");
goto out;
}
- if (chan_is_logical(d40c)) {
- /* Release logical channel, deactivate the event line */
+ d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
- d40_config_set_event(d40c, false);
+ if (chan_is_logical(d40c))
d40c->base->lookup_log_chans[d40c->log_num] = NULL;
-
- /*
- * Check if there are more logical allocation
- * on this phy channel.
- */
- if (!d40_alloc_mask_free(phy, is_src, event)) {
- /* Resume the other logical channels if any */
- if (d40_chan_has_events(d40c)) {
- res = d40_channel_execute_command(d40c,
- D40_DMA_RUN);
- if (res)
- chan_err(d40c,
- "Executing RUN command\n");
- }
- goto out;
- }
- } else {
- (void) d40_alloc_mask_free(phy, is_src, 0);
- }
-
- /* Release physical channel */
- res = d40_channel_execute_command(d40c, D40_DMA_STOP);
- if (res) {
- chan_err(d40c, "Failed to stop channel\n");
- goto out;
- }
+ else
+ d40c->base->lookup_phy_chans[phy->num] = NULL;
if (d40c->busy) {
pm_runtime_mark_last_busy(d40c->base->dev);
@@ -1852,7 +1899,6 @@ static int d40_free_dma(struct d40_chan *d40c)
d40c->busy = false;
d40c->phy_chan = NULL;
d40c->configured = false;
- d40c->base->lookup_phy_chans[phy->num] = NULL;
out:
pm_runtime_mark_last_busy(d40c->base->dev);
@@ -2070,7 +2116,7 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
if (sg_next(&sg_src[sg_len - 1]) == sg_src)
desc->cyclic = true;
- if (direction != DMA_NONE) {
+ if (direction != DMA_TRANS_NONE) {
dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
if (direction == DMA_DEV_TO_MEM)
@@ -2371,6 +2417,31 @@ static void d40_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&d40c->lock, flags);
}
+static void d40_terminate_all(struct dma_chan *chan)
+{
+ unsigned long flags;
+ struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
+ int ret;
+
+ spin_lock_irqsave(&d40c->lock, flags);
+
+ pm_runtime_get_sync(d40c->base->dev);
+ ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
+ if (ret)
+ chan_err(d40c, "Failed to stop channel\n");
+
+ d40_term_all(d40c);
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
+ if (d40c->busy) {
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
+ }
+ d40c->busy = false;
+
+ spin_unlock_irqrestore(&d40c->lock, flags);
+}
+
static int
dma40_config_to_halfchannel(struct d40_chan *d40c,
struct stedma40_half_channel_info *info,
@@ -2551,7 +2622,8 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
switch (cmd) {
case DMA_TERMINATE_ALL:
- return d40_terminate_all(d40c);
+ d40_terminate_all(chan);
+ return 0;
case DMA_PAUSE:
return d40_pause(d40c);
case DMA_RESUME:
@@ -2908,6 +2980,12 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
rev, res->start);
+ if (rev < 2) {
+ d40_err(&pdev->dev, "hardware revision: %d is not supported",
+ rev);
+ goto failure;
+ }
+
plat_data = pdev->dev.platform_data;
/* Count the number of logical channels in use */
@@ -2998,6 +3076,7 @@ failure:
if (base) {
kfree(base->lcla_pool.alloc_map);
+ kfree(base->reg_val_backup_chan);
kfree(base->lookup_log_chans);
kfree(base->lookup_phy_chans);
kfree(base->phy_res);
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 8d3d490..51e8e53 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -62,8 +62,6 @@
#define D40_SREG_ELEM_LOG_LIDX_MASK (0xFF << D40_SREG_ELEM_LOG_LIDX_POS)
/* Link register */
-#define D40_DEACTIVATE_EVENTLINE 0x0
-#define D40_ACTIVATE_EVENTLINE 0x1
#define D40_EVENTLINE_POS(i) (2 * i)
#define D40_EVENTLINE_MASK(i) (0x3 << D40_EVENTLINE_POS(i))
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index d25599f..47408e8 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -191,6 +191,190 @@ utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len)
}
}
+static bool
+validate_device_path(struct efi_variable *var, int match, u8 *buffer,
+ unsigned long len)
+{
+ struct efi_generic_dev_path *node;
+ int offset = 0;
+
+ node = (struct efi_generic_dev_path *)buffer;
+
+ if (len < sizeof(*node))
+ return false;
+
+ while (offset <= len - sizeof(*node) &&
+ node->length >= sizeof(*node) &&
+ node->length <= len - offset) {
+ offset += node->length;
+
+ if ((node->type == EFI_DEV_END_PATH ||
+ node->type == EFI_DEV_END_PATH2) &&
+ node->sub_type == EFI_DEV_END_ENTIRE)
+ return true;
+
+ node = (struct efi_generic_dev_path *)(buffer + offset);
+ }
+
+ /*
+ * If we're here then either node->length pointed past the end
+ * of the buffer or we reached the end of the buffer without
+ * finding a device path end node.
+ */
+ return false;
+}
+
+static bool
+validate_boot_order(struct efi_variable *var, int match, u8 *buffer,
+ unsigned long len)
+{
+ /* An array of 16-bit integers */
+ if ((len % 2) != 0)
+ return false;
+
+ return true;
+}
+
+static bool
+validate_load_option(struct efi_variable *var, int match, u8 *buffer,
+ unsigned long len)
+{
+ u16 filepathlength;
+ int i, desclength = 0, namelen;
+
+ namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName));
+
+ /* Either "Boot" or "Driver" followed by four digits of hex */
+ for (i = match; i < match+4; i++) {
+ if (var->VariableName[i] > 127 ||
+ hex_to_bin(var->VariableName[i] & 0xff) < 0)
+ return true;
+ }
+
+ /* Reject it if there's 4 digits of hex and then further content */
+ if (namelen > match + 4)
+ return false;
+
+ /* A valid entry must be at least 8 bytes */
+ if (len < 8)
+ return false;
+
+ filepathlength = buffer[4] | buffer[5] << 8;
+
+ /*
+ * There's no stored length for the description, so it has to be
+ * found by hand
+ */
+ desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
+
+ /* Each boot entry must have a descriptor */
+ if (!desclength)
+ return false;
+
+ /*
+ * If the sum of the length of the description, the claimed filepath
+ * length and the original header are greater than the length of the
+ * variable, it's malformed
+ */
+ if ((desclength + filepathlength + 6) > len)
+ return false;
+
+ /*
+ * And, finally, check the filepath
+ */
+ return validate_device_path(var, match, buffer + desclength + 6,
+ filepathlength);
+}
+
+static bool
+validate_uint16(struct efi_variable *var, int match, u8 *buffer,
+ unsigned long len)
+{
+ /* A single 16-bit integer */
+ if (len != 2)
+ return false;
+
+ return true;
+}
+
+static bool
+validate_ascii_string(struct efi_variable *var, int match, u8 *buffer,
+ unsigned long len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (buffer[i] > 127)
+ return false;
+
+ if (buffer[i] == 0)
+ return true;
+ }
+
+ return false;
+}
+
+struct variable_validate {
+ char *name;
+ bool (*validate)(struct efi_variable *var, int match, u8 *data,
+ unsigned long len);
+};
+
+static const struct variable_validate variable_validate[] = {
+ { "BootNext", validate_uint16 },
+ { "BootOrder", validate_boot_order },
+ { "DriverOrder", validate_boot_order },
+ { "Boot*", validate_load_option },
+ { "Driver*", validate_load_option },
+ { "ConIn", validate_device_path },
+ { "ConInDev", validate_device_path },
+ { "ConOut", validate_device_path },
+ { "ConOutDev", validate_device_path },
+ { "ErrOut", validate_device_path },
+ { "ErrOutDev", validate_device_path },
+ { "Timeout", validate_uint16 },
+ { "Lang", validate_ascii_string },
+ { "PlatformLang", validate_ascii_string },
+ { "", NULL },
+};
+
+static bool
+validate_var(struct efi_variable *var, u8 *data, unsigned long len)
+{
+ int i;
+ u16 *unicode_name = var->VariableName;
+
+ for (i = 0; variable_validate[i].validate != NULL; i++) {
+ const char *name = variable_validate[i].name;
+ int match;
+
+ for (match = 0; ; match++) {
+ char c = name[match];
+ u16 u = unicode_name[match];
+
+ /* All special variables are plain ascii */
+ if (u > 127)
+ return true;
+
+ /* Wildcard in the matching name means we've matched */
+ if (c == '*')
+ return variable_validate[i].validate(var,
+ match, data, len);
+
+ /* Case sensitive match */
+ if (c != u)
+ break;
+
+ /* Reached the end of the string while matching */
+ if (!c)
+ return variable_validate[i].validate(var,
+ match, data, len);
+ }
+ }
+
+ return true;
+}
+
static efi_status_t
get_var_data_locked(struct efivars *efivars, struct efi_variable *var)
{
@@ -324,6 +508,12 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
return -EINVAL;
}
+ if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
+ validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
+ printk(KERN_ERR "efivars: Malformed variable content\n");
+ return -EINVAL;
+ }
+
spin_lock(&efivars->lock);
status = efivars->ops->set_variable(new_var->VariableName,
&new_var->VendorGuid,
@@ -626,6 +816,12 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+ if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
+ validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
+ printk(KERN_ERR "efivars: Malformed variable content\n");
+ return -EINVAL;
+ }
+
spin_lock(&efivars->lock);
/*
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 1adc2ec..4461540 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -965,18 +965,15 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
}
_gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv);
- _gpio_rmw(base, bank->regs->irqstatus, l,
- bank->regs->irqenable_inv == false);
- _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->debounce_en != 0);
- _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->ctrl != 0);
+ _gpio_rmw(base, bank->regs->irqstatus, l, !bank->regs->irqenable_inv);
if (bank->regs->debounce_en)
- _gpio_rmw(base, bank->regs->debounce_en, 0, 1);
+ __raw_writel(0, base + bank->regs->debounce_en);
/* Save OE default value (0xffffffff) in the context */
bank->context.oe = __raw_readl(bank->base + bank->regs->direction);
/* Initialize interface clk ungated, module enabled */
if (bank->regs->ctrl)
- _gpio_rmw(base, bank->regs->ctrl, 0, 1);
+ __raw_writel(0, base + bank->regs->ctrl);
}
static __devinit void
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index e8729cc..2cd958e 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -230,16 +230,12 @@ static void pch_gpio_setup(struct pch_gpio *chip)
static int pch_irq_type(struct irq_data *d, unsigned int type)
{
- u32 im;
- u32 __iomem *im_reg;
- u32 ien;
- u32 im_pos;
- int ch;
- unsigned long flags;
- u32 val;
- int irq = d->irq;
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct pch_gpio *chip = gc->private;
+ u32 im, im_pos, val;
+ u32 __iomem *im_reg;
+ unsigned long flags;
+ int ch, irq = d->irq;
ch = irq - chip->irq_base;
if (irq <= chip->irq_base + 7) {
@@ -270,30 +266,22 @@ static int pch_irq_type(struct irq_data *d, unsigned int type)
case IRQ_TYPE_LEVEL_LOW:
val = PCH_LEVEL_L;
break;
- case IRQ_TYPE_PROBE:
- goto end;
default:
- dev_warn(chip->dev, "%s: unknown type(%dd)",
- __func__, type);
- goto end;
+ goto unlock;
}
/* Set interrupt mode */
im = ioread32(im_reg) & ~(PCH_IM_MASK << (im_pos * 4));
iowrite32(im | (val << (im_pos * 4)), im_reg);
- /* iclr */
- iowrite32(BIT(ch), &chip->reg->iclr);
+ /* And the handler */
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+ __irq_set_handler_locked(d->irq, handle_level_irq);
+ else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
- /* IMASKCLR */
- iowrite32(BIT(ch), &chip->reg->imaskclr);
-
- /* Enable interrupt */
- ien = ioread32(&chip->reg->ien);
- iowrite32(ien | BIT(ch), &chip->reg->ien);
-end:
+unlock:
spin_unlock_irqrestore(&chip->spinlock, flags);
-
return 0;
}
@@ -313,18 +301,24 @@ static void pch_irq_mask(struct irq_data *d)
iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->imask);
}
+static void pch_irq_ack(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct pch_gpio *chip = gc->private;
+
+ iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->iclr);
+}
+
static irqreturn_t pch_gpio_handler(int irq, void *dev_id)
{
struct pch_gpio *chip = dev_id;
u32 reg_val = ioread32(&chip->reg->istatus);
- int i;
- int ret = IRQ_NONE;
+ int i, ret = IRQ_NONE;
for (i = 0; i < gpio_pins[chip->ioh]; i++) {
if (reg_val & BIT(i)) {
dev_dbg(chip->dev, "%s:[%d]:irq=%d status=0x%x\n",
__func__, i, irq, reg_val);
- iowrite32(BIT(i), &chip->reg->iclr);
generic_handle_irq(chip->irq_base + i);
ret = IRQ_HANDLED;
}
@@ -343,6 +337,7 @@ static __devinit void pch_gpio_alloc_generic_chip(struct pch_gpio *chip,
gc->private = chip;
ct = gc->chip_types;
+ ct->chip.irq_ack = pch_irq_ack;
ct->chip.irq_mask = pch_irq_mask;
ct->chip.irq_unmask = pch_irq_unmask;
ct->chip.irq_set_type = pch_irq_type;
@@ -357,6 +352,7 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
s32 ret;
struct pch_gpio *chip;
int irq_base;
+ u32 msk;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
@@ -408,8 +404,13 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
}
chip->irq_base = irq_base;
+ /* Mask all interrupts, but enable them */
+ msk = (1 << gpio_pins[chip->ioh]) - 1;
+ iowrite32(msk, &chip->reg->imask);
+ iowrite32(msk, &chip->reg->ien);
+
ret = request_irq(pdev->irq, pch_gpio_handler,
- IRQF_SHARED, KBUILD_MODNAME, chip);
+ IRQF_SHARED, KBUILD_MODNAME, chip);
if (ret != 0) {
dev_err(&pdev->dev,
"%s request_irq failed\n", __func__);
@@ -418,8 +419,6 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
pch_gpio_alloc_generic_chip(chip, irq_base, gpio_pins[chip->ioh]);
- /* Initialize interrupt ien register */
- iowrite32(0, &chip->reg->ien);
end:
return 0;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 5689ce6..fc3ace3 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -64,6 +64,7 @@ struct pxa_gpio_chip {
unsigned long irq_mask;
unsigned long irq_edge_rise;
unsigned long irq_edge_fall;
+ int (*set_wake)(unsigned int gpio, unsigned int on);
#ifdef CONFIG_PM
unsigned long saved_gplr;
@@ -269,7 +270,8 @@ static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
(value ? GPSR_OFFSET : GPCR_OFFSET));
}
-static int __devinit pxa_init_gpio_chip(int gpio_end)
+static int __devinit pxa_init_gpio_chip(int gpio_end,
+ int (*set_wake)(unsigned int, unsigned int))
{
int i, gpio, nbanks = gpio_to_bank(gpio_end) + 1;
struct pxa_gpio_chip *chips;
@@ -285,6 +287,7 @@ static int __devinit pxa_init_gpio_chip(int gpio_end)
sprintf(chips[i].label, "gpio-%d", i);
chips[i].regbase = gpio_reg_base + BANK_OFF(i);
+ chips[i].set_wake = set_wake;
c->base = gpio;
c->label = chips[i].label;
@@ -412,6 +415,17 @@ static void pxa_mask_muxed_gpio(struct irq_data *d)
writel_relaxed(gfer, c->regbase + GFER_OFFSET);
}
+static int pxa_gpio_set_wake(struct irq_data *d, unsigned int on)
+{
+ int gpio = pxa_irq_to_gpio(d->irq);
+ struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
+
+ if (c->set_wake)
+ return c->set_wake(gpio, on);
+ else
+ return 0;
+}
+
static void pxa_unmask_muxed_gpio(struct irq_data *d)
{
int gpio = pxa_irq_to_gpio(d->irq);
@@ -427,6 +441,7 @@ static struct irq_chip pxa_muxed_gpio_chip = {
.irq_mask = pxa_mask_muxed_gpio,
.irq_unmask = pxa_unmask_muxed_gpio,
.irq_set_type = pxa_gpio_irq_type,
+ .irq_set_wake = pxa_gpio_set_wake,
};
static int pxa_gpio_nums(void)
@@ -471,6 +486,7 @@ static int __devinit pxa_gpio_probe(struct platform_device *pdev)
struct pxa_gpio_chip *c;
struct resource *res;
struct clk *clk;
+ struct pxa_gpio_platform_data *info;
int gpio, irq, ret;
int irq0 = 0, irq1 = 0, irq_mux, gpio_offset = 0;
@@ -516,7 +532,8 @@ static int __devinit pxa_gpio_probe(struct platform_device *pdev)
}
/* Initialize GPIO chips */
- pxa_init_gpio_chip(pxa_last_gpio);
+ info = dev_get_platdata(&pdev->dev);
+ pxa_init_gpio_chip(pxa_last_gpio, info ? info->gpio_set_wake : NULL);
/* clear all GPIO edge detects */
for_each_gpio_chip(gpio, c) {
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 19d6fc0..e991d91 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -452,12 +452,14 @@ static struct samsung_gpio_cfg s3c24xx_gpiocfg_banka = {
};
#endif
+#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5)
static struct samsung_gpio_cfg exynos_gpio_cfg = {
.set_pull = exynos_gpio_setpull,
.get_pull = exynos_gpio_getpull,
.set_config = samsung_gpio_setcfg_4bit,
.get_config = samsung_gpio_getcfg_4bit,
};
+#endif
#if defined(CONFIG_CPU_S5P6440) || defined(CONFIG_CPU_S5P6450)
static struct samsung_gpio_cfg s5p64x0_gpio_cfg_rbank = {
@@ -2123,8 +2125,8 @@ static struct samsung_gpio_chip s5pv210_gpios_4bit[] = {
* uses the above macro and depends on the banks being listed in order here.
*/
-static struct samsung_gpio_chip exynos4_gpios_1[] = {
#ifdef CONFIG_ARCH_EXYNOS4
+static struct samsung_gpio_chip exynos4_gpios_1[] = {
{
.chip = {
.base = EXYNOS4_GPA0(0),
@@ -2222,11 +2224,11 @@ static struct samsung_gpio_chip exynos4_gpios_1[] = {
.label = "GPF3",
},
},
-#endif
};
+#endif
-static struct samsung_gpio_chip exynos4_gpios_2[] = {
#ifdef CONFIG_ARCH_EXYNOS4
+static struct samsung_gpio_chip exynos4_gpios_2[] = {
{
.chip = {
.base = EXYNOS4_GPJ0(0),
@@ -2367,11 +2369,11 @@ static struct samsung_gpio_chip exynos4_gpios_2[] = {
.to_irq = samsung_gpiolib_to_irq,
},
},
-#endif
};
+#endif
-static struct samsung_gpio_chip exynos4_gpios_3[] = {
#ifdef CONFIG_ARCH_EXYNOS4
+static struct samsung_gpio_chip exynos4_gpios_3[] = {
{
.chip = {
.base = EXYNOS4_GPZ(0),
@@ -2379,8 +2381,8 @@ static struct samsung_gpio_chip exynos4_gpios_3[] = {
.label = "GPZ",
},
},
-#endif
};
+#endif
#ifdef CONFIG_ARCH_EXYNOS5
static struct samsung_gpio_chip exynos5_gpios_1[] = {
@@ -2719,7 +2721,9 @@ static __init int samsung_gpiolib_init(void)
{
struct samsung_gpio_chip *chip;
int i, nr_chips;
+#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS5250)
void __iomem *gpio_base1, *gpio_base2, *gpio_base3, *gpio_base4;
+#endif
int group = 0;
samsung_gpiolib_set_cfg(samsung_gpio_cfgs, ARRAY_SIZE(samsung_gpio_cfgs));
@@ -2971,6 +2975,7 @@ static __init int samsung_gpiolib_init(void)
return 0;
+#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS5250)
err_ioremap4:
iounmap(gpio_base3);
err_ioremap3:
@@ -2979,6 +2984,7 @@ err_ioremap2:
iounmap(gpio_base1);
err_ioremap1:
return -ENOMEM;
+#endif
}
core_initcall(samsung_gpiolib_init);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 30372f7..348b367 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -1510,8 +1510,8 @@ int drm_freebufs(struct drm_device *dev, void *data,
* \param arg pointer to a drm_buf_map structure.
* \return zero on success or a negative number on failure.
*
- * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
- * about each buffer into user space. For PCI buffers, it calls do_mmap() with
+ * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
+ * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
* offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
* drm_mmap_dma().
*/
@@ -1553,18 +1553,14 @@ int drm_mapbufs(struct drm_device *dev, void *data,
retcode = -EINVAL;
goto done;
}
- down_write(&current->mm->mmap_sem);
- virtual = do_mmap(file_priv->filp, 0, map->size,
+ virtual = vm_mmap(file_priv->filp, 0, map->size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
token);
- up_write(&current->mm->mmap_sem);
} else {
- down_write(&current->mm->mmap_sem);
- virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
+ virtual = vm_mmap(file_priv->filp, 0, dma->byte_count,
PROT_READ | PROT_WRITE,
MAP_SHARED, 0);
- up_write(&current->mm->mmap_sem);
}
if (virtual > -1024UL) {
/* Real error */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 26d5197..1dffa83 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -149,22 +149,12 @@ static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
unsigned long pfn;
if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- unsigned long usize = buf->size;
-
if (!buf->pages)
return -EINTR;
- while (usize > 0) {
- pfn = page_to_pfn(buf->pages[page_offset++]);
- vm_insert_mixed(vma, f_vaddr, pfn);
- f_vaddr += PAGE_SIZE;
- usize -= PAGE_SIZE;
- }
-
- return 0;
- }
-
- pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
+ pfn = page_to_pfn(buf->pages[page_offset++]);
+ } else
+ pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
return vm_insert_mixed(vma, f_vaddr, pfn);
}
@@ -524,6 +514,8 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
if (!buffer->pages)
return -EINVAL;
+ vma->vm_flags |= VM_MIXEDMAP;
+
do {
ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
if (ret) {
@@ -581,10 +573,8 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
obj->filp->f_op = &exynos_drm_gem_fops;
obj->filp->private_data = obj;
- down_write(&current->mm->mmap_sem);
- addr = do_mmap(obj->filp, 0, args->size,
+ addr = vm_mmap(obj->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED, 0);
- up_write(&current->mm->mmap_sem);
drm_gem_object_unreference_unlocked(obj);
@@ -712,7 +702,6 @@ int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj = vma->vm_private_data;
- struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct drm_device *dev = obj->dev;
unsigned long f_vaddr;
pgoff_t page_offset;
@@ -724,21 +713,10 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
mutex_lock(&dev->struct_mutex);
- /*
- * allocate all pages as desired size if user wants to allocate
- * physically non-continuous memory.
- */
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- ret = exynos_drm_gem_get_pages(obj);
- if (ret < 0)
- goto err;
- }
-
ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
if (ret < 0)
DRM_ERROR("failed to map pages.\n");
-err:
mutex_unlock(&dev->struct_mutex);
return convert_to_vm_err_msg(ret);
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 2c8a60c..f920fb5 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -129,6 +129,7 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
if (buf_priv->currently_mapped == I810_BUF_MAPPED)
return -EINVAL;
+ /* This is all entirely broken */
down_write(&current->mm->mmap_sem);
old_fops = file_priv->filp->f_op;
file_priv->filp->f_op = &i810_buffer_fops;
@@ -157,11 +158,8 @@ static int i810_unmap_buffer(struct drm_buf *buf)
if (buf_priv->currently_mapped != I810_BUF_MAPPED)
return -EINVAL;
- down_write(&current->mm->mmap_sem);
- retcode = do_munmap(current->mm,
- (unsigned long)buf_priv->virtual,
+ retcode = vm_munmap((unsigned long)buf_priv->virtual,
(size_t) buf->total);
- up_write(&current->mm->mmap_sem);
buf_priv->currently_mapped = I810_BUF_UNMAPPED;
buf_priv->virtual = NULL;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b505b70..e6162a1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1224,6 +1224,9 @@ static int i915_emon_status(struct seq_file *m, void *unused)
unsigned long temp, chipset, gfx;
int ret;
+ if (!IS_GEN5(dev))
+ return -ENODEV;
+
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 785f67f..ba60f3c 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1701,6 +1701,9 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
unsigned long diffms;
u32 count;
+ if (dev_priv->info->gen != 5)
+ return;
+
getrawmonotonic(&now);
diff1 = timespec_sub(now, dev_priv->last_time2);
@@ -2121,12 +2124,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
(unsigned long) dev);
- spin_lock(&mchdev_lock);
- i915_mch_dev = dev_priv;
- dev_priv->mchdev_lock = &mchdev_lock;
- spin_unlock(&mchdev_lock);
+ if (IS_GEN5(dev)) {
+ spin_lock(&mchdev_lock);
+ i915_mch_dev = dev_priv;
+ dev_priv->mchdev_lock = &mchdev_lock;
+ spin_unlock(&mchdev_lock);
- ips_ping_for_i915_load();
+ ips_ping_for_i915_load();
+ }
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0e3c6ac..0d1e4b7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1087,11 +1087,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
if (obj == NULL)
return -ENOENT;
- down_write(&current->mm->mmap_sem);
- addr = do_mmap(obj->filp, 0, args->size,
+ addr = vm_mmap(obj->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
args->offset);
- up_write(&current->mm->mmap_sem);
drm_gem_object_unreference_unlocked(obj);
if (IS_ERR((void *)addr))
return addr;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f51a696..de43194 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1133,6 +1133,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
+ DRM_DEBUG("execbuf with %u cliprects\n",
+ args->num_cliprects);
+ return -EINVAL;
+ }
cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
GFP_KERNEL);
if (cliprects == NULL) {
@@ -1404,7 +1409,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
- if (args->buffer_count < 1) {
+ if (args->buffer_count < 1 ||
+ args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b4bb1ef..9d24d65 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -568,6 +568,7 @@
#define CM0_MASK_SHIFT 16
#define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5)
+#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
#define CM0_DEPTH_EVICT_DISABLE (1<<4)
#define CM0_COLOR_EVICT_DISABLE (1<<3)
#define CM0_DEPTH_WRITE_DISABLE (1<<1)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 4d3d736..90b9793 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -430,8 +430,8 @@ intel_crt_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct intel_crt *crt = intel_attached_crt(connector);
- struct drm_crtc *crtc;
enum drm_connector_status status;
+ struct intel_load_detect_pipe tmp;
if (I915_HAS_HOTPLUG(dev)) {
if (intel_crt_detect_hotplug(connector)) {
@@ -450,23 +450,16 @@ intel_crt_detect(struct drm_connector *connector, bool force)
return connector->status;
/* for pre-945g platforms use load detect */
- crtc = crt->base.base.crtc;
- if (crtc && crtc->enabled) {
- status = intel_crt_load_detect(crt);
- } else {
- struct intel_load_detect_pipe tmp;
-
- if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
- &tmp)) {
- if (intel_crt_detect_ddc(connector))
- status = connector_status_connected;
- else
- status = intel_crt_load_detect(crt);
- intel_release_load_detect_pipe(&crt->base, connector,
- &tmp);
- } else
- status = connector_status_unknown;
- }
+ if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
+ &tmp)) {
+ if (intel_crt_detect_ddc(connector))
+ status = connector_status_connected;
+ else
+ status = intel_crt_load_detect(crt);
+ intel_release_load_detect_pipe(&crt->base, connector,
+ &tmp);
+ } else
+ status = connector_status_unknown;
return status;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5908cd5..1b1cf3b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -7072,9 +7072,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- int dpll_reg = DPLL(pipe);
- int dpll = I915_READ(dpll_reg);
if (HAS_PCH_SPLIT(dev))
return;
@@ -7087,10 +7084,15 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
* the manual case.
*/
if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
+ int pipe = intel_crtc->pipe;
+ int dpll_reg = DPLL(pipe);
+ u32 dpll;
+
DRM_DEBUG_DRIVER("downclocking LVDS\n");
assert_panel_unlocked(dev_priv, pipe);
+ dpll = I915_READ(dpll_reg);
dpll |= DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
intel_wait_for_vblank(dev, pipe);
@@ -7098,7 +7100,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
}
-
}
/**
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index cae3e5f..2d7f47b 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -136,7 +136,7 @@ static void i9xx_write_infoframe(struct drm_encoder *encoder,
val &= ~VIDEO_DIP_SELECT_MASK;
- I915_WRITE(VIDEO_DIP_CTL, val | port | flags);
+ I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
for (i = 0; i < len; i += 4) {
I915_WRITE(VIDEO_DIP_DATA, *data);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 30e2c82..9c71183 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -750,7 +750,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
.ident = "Hewlett-Packard t5745",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_BOARD_NAME, "hp t5745"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"),
},
},
{
@@ -758,7 +758,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
.ident = "Hewlett-Packard st5747",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_BOARD_NAME, "hp st5747"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"),
},
},
{
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f75806e..62892a8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -398,6 +398,17 @@ static int init_render_ring(struct intel_ring_buffer *ring)
return ret;
}
+
+ if (IS_GEN6(dev)) {
+ /* From the Sandybridge PRM, volume 1 part 3, page 24:
+ * "If this bit is set, STCunit will have LRA as replacement
+ * policy. [...] This bit must be reset. LRA replacement
+ * policy is not supported."
+ */
+ I915_WRITE(CACHE_MODE_0,
+ CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
+ }
+
if (INTEL_INFO(dev)->gen >= 6) {
I915_WRITE(INSTPM,
INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e36b171..ae5e748 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -731,6 +731,7 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
uint16_t width, height;
uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
uint16_t h_sync_offset, v_sync_offset;
+ int mode_clock;
width = mode->crtc_hdisplay;
height = mode->crtc_vdisplay;
@@ -745,7 +746,11 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
- dtd->part1.clock = mode->clock / 10;
+ mode_clock = mode->clock;
+ mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
+ mode_clock /= 10;
+ dtd->part1.clock = mode_clock;
+
dtd->part1.h_active = width & 0xff;
dtd->part1.h_blank = h_blank_len & 0xff;
dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
@@ -996,7 +1001,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
- struct intel_sdvo_dtd input_dtd;
+ struct intel_sdvo_dtd input_dtd, output_dtd;
int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
int rate;
@@ -1021,20 +1026,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
intel_sdvo->attached_output))
return;
- /* We have tried to get input timing in mode_fixup, and filled into
- * adjusted_mode.
- */
- if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
- input_dtd = intel_sdvo->input_dtd;
- } else {
- /* Set the output timing to the screen */
- if (!intel_sdvo_set_target_output(intel_sdvo,
- intel_sdvo->attached_output))
- return;
-
- intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
- (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
- }
+ /* lvds has a special fixed output timing. */
+ if (intel_sdvo->is_lvds)
+ intel_sdvo_get_dtd_from_mode(&output_dtd,
+ intel_sdvo->sdvo_lvds_fixed_mode);
+ else
+ intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+ (void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd);
/* Set the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
@@ -1052,6 +1050,10 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
!intel_sdvo_set_tv_format(intel_sdvo))
return;
+ /* We have tried to get input timing in mode_fixup, and filled into
+ * adjusted_mode.
+ */
+ intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
(void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
switch (pixel_multiplier) {
@@ -1218,8 +1220,14 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
{
+ struct drm_device *dev = intel_sdvo->base.base.dev;
u8 response[2];
+ /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
+ * on the line. */
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ return false;
+
return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
&response, 2) && response[0];
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 7814a76..284bd25 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -270,7 +270,7 @@ static bool nouveau_dsm_detect(void)
struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
struct pci_dev *pdev = NULL;
int has_dsm = 0;
- int has_optimus;
+ int has_optimus = 0;
int vga_count = 0;
bool guid_valid;
int retval;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 80963d0..0be4a81 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6156,10 +6156,14 @@ dcb_fake_connectors(struct nvbios *bios)
/* heuristic: if we ever get a non-zero connector field, assume
* that all the indices are valid and we don't need fake them.
+ *
+ * and, as usual, a blacklist of boards with bad bios data..
*/
- for (i = 0; i < dcbt->entries; i++) {
- if (dcbt->entry[i].connector)
- return;
+ if (!nv_match_device(bios->dev, 0x0392, 0x107d, 0x20a2)) {
+ for (i = 0; i < dcbt->entries; i++) {
+ if (dcbt->entry[i].connector)
+ return;
+ }
}
/* no useful connector info available, we need to make it up
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
index 59ea1c1..c3de363 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hdmi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
@@ -32,7 +32,9 @@ static bool
hdmi_sor(struct drm_encoder *encoder)
{
struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
- if (dev_priv->chipset < 0xa3)
+ if (dev_priv->chipset < 0xa3 ||
+ dev_priv->chipset == 0xaa ||
+ dev_priv->chipset == 0xac)
return false;
return true;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index e2be95a..77e5646 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -29,10 +29,6 @@
#include "nouveau_i2c.h"
#include "nouveau_hw.h"
-#define T_TIMEOUT 2200000
-#define T_RISEFALL 1000
-#define T_HOLD 5000
-
static void
i2c_drive_scl(void *data, int state)
{
@@ -113,175 +109,6 @@ i2c_sense_sda(void *data)
return 0;
}
-static void
-i2c_delay(struct nouveau_i2c_chan *port, u32 nsec)
-{
- udelay((nsec + 500) / 1000);
-}
-
-static bool
-i2c_raise_scl(struct nouveau_i2c_chan *port)
-{
- u32 timeout = T_TIMEOUT / T_RISEFALL;
-
- i2c_drive_scl(port, 1);
- do {
- i2c_delay(port, T_RISEFALL);
- } while (!i2c_sense_scl(port) && --timeout);
-
- return timeout != 0;
-}
-
-static int
-i2c_start(struct nouveau_i2c_chan *port)
-{
- int ret = 0;
-
- port->state = i2c_sense_scl(port);
- port->state |= i2c_sense_sda(port) << 1;
- if (port->state != 3) {
- i2c_drive_scl(port, 0);
- i2c_drive_sda(port, 1);
- if (!i2c_raise_scl(port))
- ret = -EBUSY;
- }
-
- i2c_drive_sda(port, 0);
- i2c_delay(port, T_HOLD);
- i2c_drive_scl(port, 0);
- i2c_delay(port, T_HOLD);
- return ret;
-}
-
-static void
-i2c_stop(struct nouveau_i2c_chan *port)
-{
- i2c_drive_scl(port, 0);
- i2c_drive_sda(port, 0);
- i2c_delay(port, T_RISEFALL);
-
- i2c_drive_scl(port, 1);
- i2c_delay(port, T_HOLD);
- i2c_drive_sda(port, 1);
- i2c_delay(port, T_HOLD);
-}
-
-static int
-i2c_bitw(struct nouveau_i2c_chan *port, int sda)
-{
- i2c_drive_sda(port, sda);
- i2c_delay(port, T_RISEFALL);
-
- if (!i2c_raise_scl(port))
- return -ETIMEDOUT;
- i2c_delay(port, T_HOLD);
-
- i2c_drive_scl(port, 0);
- i2c_delay(port, T_HOLD);
- return 0;
-}
-
-static int
-i2c_bitr(struct nouveau_i2c_chan *port)
-{
- int sda;
-
- i2c_drive_sda(port, 1);
- i2c_delay(port, T_RISEFALL);
-
- if (!i2c_raise_scl(port))
- return -ETIMEDOUT;
- i2c_delay(port, T_HOLD);
-
- sda = i2c_sense_sda(port);
-
- i2c_drive_scl(port, 0);
- i2c_delay(port, T_HOLD);
- return sda;
-}
-
-static int
-i2c_get_byte(struct nouveau_i2c_chan *port, u8 *byte, bool last)
-{
- int i, bit;
-
- *byte = 0;
- for (i = 7; i >= 0; i--) {
- bit = i2c_bitr(port);
- if (bit < 0)
- return bit;
- *byte |= bit << i;
- }
-
- return i2c_bitw(port, last ? 1 : 0);
-}
-
-static int
-i2c_put_byte(struct nouveau_i2c_chan *port, u8 byte)
-{
- int i, ret;
- for (i = 7; i >= 0; i--) {
- ret = i2c_bitw(port, !!(byte & (1 << i)));
- if (ret < 0)
- return ret;
- }
-
- ret = i2c_bitr(port);
- if (ret == 1) /* nack */
- ret = -EIO;
- return ret;
-}
-
-static int
-i2c_addr(struct nouveau_i2c_chan *port, struct i2c_msg *msg)
-{
- u32 addr = msg->addr << 1;
- if (msg->flags & I2C_M_RD)
- addr |= 1;
- return i2c_put_byte(port, addr);
-}
-
-static int
-i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
-{
- struct nouveau_i2c_chan *port = (struct nouveau_i2c_chan *)adap;
- struct i2c_msg *msg = msgs;
- int ret = 0, mcnt = num;
-
- while (!ret && mcnt--) {
- u8 remaining = msg->len;
- u8 *ptr = msg->buf;
-
- ret = i2c_start(port);
- if (ret == 0)
- ret = i2c_addr(port, msg);
-
- if (msg->flags & I2C_M_RD) {
- while (!ret && remaining--)
- ret = i2c_get_byte(port, ptr++, !remaining);
- } else {
- while (!ret && remaining--)
- ret = i2c_put_byte(port, *ptr++);
- }
-
- msg++;
- }
-
- i2c_stop(port);
- return (ret < 0) ? ret : num;
-}
-
-static u32
-i2c_bit_func(struct i2c_adapter *adap)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-const struct i2c_algorithm nouveau_i2c_bit_algo = {
- .master_xfer = i2c_bit_xfer,
- .functionality = i2c_bit_func
-};
-
static const uint32_t nv50_i2c_port[] = {
0x00e138, 0x00e150, 0x00e168, 0x00e180,
0x00e254, 0x00e274, 0x00e764, 0x00e780,
@@ -384,12 +211,10 @@ nouveau_i2c_init(struct drm_device *dev)
case 0: /* NV04:NV50 */
port->drive = entry[0];
port->sense = entry[1];
- port->adapter.algo = &nouveau_i2c_bit_algo;
break;
case 4: /* NV4E */
port->drive = 0x600800 + entry[1];
port->sense = port->drive;
- port->adapter.algo = &nouveau_i2c_bit_algo;
break;
case 5: /* NV50- */
port->drive = entry[0] & 0x0f;
@@ -402,7 +227,6 @@ nouveau_i2c_init(struct drm_device *dev)
port->drive = 0x00d014 + (port->drive * 0x20);
port->sense = port->drive;
}
- port->adapter.algo = &nouveau_i2c_bit_algo;
break;
case 6: /* NV50- DP AUX */
port->drive = entry[0];
@@ -413,7 +237,7 @@ nouveau_i2c_init(struct drm_device *dev)
break;
}
- if (!port->adapter.algo) {
+ if (!port->adapter.algo && !port->drive) {
NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n",
i, port->type, port->drive, port->sense);
kfree(port);
@@ -429,7 +253,26 @@ nouveau_i2c_init(struct drm_device *dev)
port->dcb = ROM32(entry[0]);
i2c_set_adapdata(&port->adapter, i2c);
- ret = i2c_add_adapter(&port->adapter);
+ if (port->adapter.algo != &nouveau_dp_i2c_algo) {
+ port->adapter.algo_data = &port->bit;
+ port->bit.udelay = 10;
+ port->bit.timeout = usecs_to_jiffies(2200);
+ port->bit.data = port;
+ port->bit.setsda = i2c_drive_sda;
+ port->bit.setscl = i2c_drive_scl;
+ port->bit.getsda = i2c_sense_sda;
+ port->bit.getscl = i2c_sense_scl;
+
+ i2c_drive_scl(port, 0);
+ i2c_drive_sda(port, 1);
+ i2c_drive_scl(port, 1);
+
+ ret = i2c_bit_add_bus(&port->adapter);
+ } else {
+ port->adapter.algo = &nouveau_dp_i2c_algo;
+ ret = i2c_add_adapter(&port->adapter);
+ }
+
if (ret) {
NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret);
kfree(port);
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
index 4d2e4e9..1d08389 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.h
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
@@ -34,6 +34,7 @@
struct nouveau_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
+ struct i2c_algo_bit_data bit;
struct list_head head;
u8 index;
u8 type;
diff --git a/drivers/gpu/drm/nouveau/nv10_gpio.c b/drivers/gpu/drm/nouveau/nv10_gpio.c
index 550ad3f..9d79180 100644
--- a/drivers/gpu/drm/nouveau/nv10_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv10_gpio.c
@@ -65,7 +65,7 @@ nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out)
if (line < 10) {
line = (line - 2) * 4;
reg = NV_PCRTC_GPIO_EXT;
- mask = 0x00000003 << ((line - 2) * 4);
+ mask = 0x00000003;
data = (dir << 1) | out;
} else
if (line < 14) {
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c
index 5bf5503..f704e94 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fb.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fb.c
@@ -54,6 +54,11 @@ nvc0_mfb_isr(struct drm_device *dev)
nvc0_mfb_subp_isr(dev, unit, subp);
units &= ~(1 << unit);
}
+
+ /* we do something horribly wrong and upset PMFB a lot, so mask off
+ * interrupts from it after the first one until it's fixed
+ */
+ nv_mask(dev, 0x000640, 0x02000000, 0x00000000);
}
static void
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index b5ff1f7..af1054f 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -575,6 +575,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (rdev->family < CHIP_RV770)
pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
+ /* use frac fb div on APUs */
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
+ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
} else {
pll->flags |= RADEON_PLL_LEGACY;
@@ -955,8 +958,8 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
break;
}
- if (radeon_encoder->active_device &
- (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) {
+ if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
+ (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector =
radeon_get_connector_for_encoder(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index ea7df16e2..5992502 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -241,8 +241,8 @@ int radeon_wb_init(struct radeon_device *rdev)
rdev->wb.use_event = true;
}
}
- /* always use writeback/events on NI */
- if (ASIC_IS_DCE5(rdev)) {
+ /* always use writeback/events on NI, APUs */
+ if (rdev->family >= CHIP_PALM) {
rdev->wb.enabled = true;
rdev->wb.use_event = true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 8086c96..0a1d4bd 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -533,7 +533,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
radeon_legacy_init_crtc(dev, radeon_crtc);
}
-static const char *encoder_names[36] = {
+static const char *encoder_names[37] = {
"NONE",
"INTERNAL_LVDS",
"INTERNAL_TMDS1",
@@ -570,6 +570,7 @@ static const char *encoder_names[36] = {
"INTERNAL_UNIPHY2",
"NUTMEG",
"TRAVIS",
+ "INTERNAL_VCE"
};
static const char *connector_names[15] = {
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index a3d0332..ffddcba 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -34,7 +34,7 @@ config HID
config HID_BATTERY_STRENGTH
bool
depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY
- default y
+ default n
config HIDRAW
bool "/dev/hidraw raw HID device support"
diff --git a/drivers/hid/hid-tivo.c b/drivers/hid/hid-tivo.c
index de47039..9f85f82 100644
--- a/drivers/hid/hid-tivo.c
+++ b/drivers/hid/hid-tivo.c
@@ -62,7 +62,7 @@ static int tivo_input_mapping(struct hid_device *hdev, struct hid_input *hi,
static const struct hid_device_id tivo_devices[] = {
/* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */
- { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
{ }
};
diff --git a/drivers/hsi/clients/hsi_char.c b/drivers/hsi/clients/hsi_char.c
index 88a050d..3ad91f6 100644
--- a/drivers/hsi/clients/hsi_char.c
+++ b/drivers/hsi/clients/hsi_char.c
@@ -123,7 +123,7 @@ struct hsc_client_data {
static unsigned int hsc_major;
/* Maximum buffer size that hsi_char will accept from userspace */
static unsigned int max_data_size = 0x1000;
-module_param(max_data_size, uint, S_IRUSR | S_IWUSR);
+module_param(max_data_size, uint, 0);
MODULE_PARM_DESC(max_data_size, "max read/write data size [4,8..65536] (^2)");
static void hsc_add_tail(struct hsc_channel *channel, struct hsi_msg *msg,
diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c
index 4e2d79b..2d58f93 100644
--- a/drivers/hsi/hsi.c
+++ b/drivers/hsi/hsi.c
@@ -21,26 +21,13 @@
*/
#include <linux/hsi/hsi.h>
#include <linux/compiler.h>
-#include <linux/rwsem.h>
#include <linux/list.h>
-#include <linux/spinlock.h>
#include <linux/kobject.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/notifier.h>
#include "hsi_core.h"
-static struct device_type hsi_ctrl = {
- .name = "hsi_controller",
-};
-
-static struct device_type hsi_cl = {
- .name = "hsi_client",
-};
-
-static struct device_type hsi_port = {
- .name = "hsi_port",
-};
-
static ssize_t modalias_show(struct device *dev,
struct device_attribute *a __maybe_unused, char *buf)
{
@@ -54,8 +41,7 @@ static struct device_attribute hsi_bus_dev_attrs[] = {
static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- if (dev->type == &hsi_cl)
- add_uevent_var(env, "MODALIAS=hsi:%s", dev_name(dev));
+ add_uevent_var(env, "MODALIAS=hsi:%s", dev_name(dev));
return 0;
}
@@ -80,12 +66,10 @@ static void hsi_client_release(struct device *dev)
static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info)
{
struct hsi_client *cl;
- unsigned long flags;
cl = kzalloc(sizeof(*cl), GFP_KERNEL);
if (!cl)
return;
- cl->device.type = &hsi_cl;
cl->tx_cfg = info->tx_cfg;
cl->rx_cfg = info->rx_cfg;
cl->device.bus = &hsi_bus_type;
@@ -93,14 +77,11 @@ static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info)
cl->device.release = hsi_client_release;
dev_set_name(&cl->device, info->name);
cl->device.platform_data = info->platform_data;
- spin_lock_irqsave(&port->clock, flags);
- list_add_tail(&cl->link, &port->clients);
- spin_unlock_irqrestore(&port->clock, flags);
if (info->archdata)
cl->device.archdata = *info->archdata;
if (device_register(&cl->device) < 0) {
pr_err("hsi: failed to register client: %s\n", info->name);
- kfree(cl);
+ put_device(&cl->device);
}
}
@@ -120,13 +101,6 @@ static void hsi_scan_board_info(struct hsi_controller *hsi)
static int hsi_remove_client(struct device *dev, void *data __maybe_unused)
{
- struct hsi_client *cl = to_hsi_client(dev);
- struct hsi_port *port = to_hsi_port(dev->parent);
- unsigned long flags;
-
- spin_lock_irqsave(&port->clock, flags);
- list_del(&cl->link);
- spin_unlock_irqrestore(&port->clock, flags);
device_unregister(dev);
return 0;
@@ -140,12 +114,17 @@ static int hsi_remove_port(struct device *dev, void *data __maybe_unused)
return 0;
}
-static void hsi_controller_release(struct device *dev __maybe_unused)
+static void hsi_controller_release(struct device *dev)
{
+ struct hsi_controller *hsi = to_hsi_controller(dev);
+
+ kfree(hsi->port);
+ kfree(hsi);
}
-static void hsi_port_release(struct device *dev __maybe_unused)
+static void hsi_port_release(struct device *dev)
{
+ kfree(to_hsi_port(dev));
}
/**
@@ -170,20 +149,12 @@ int hsi_register_controller(struct hsi_controller *hsi)
unsigned int i;
int err;
- hsi->device.type = &hsi_ctrl;
- hsi->device.bus = &hsi_bus_type;
- hsi->device.release = hsi_controller_release;
- err = device_register(&hsi->device);
+ err = device_add(&hsi->device);
if (err < 0)
return err;
for (i = 0; i < hsi->num_ports; i++) {
- hsi->port[i].device.parent = &hsi->device;
- hsi->port[i].device.bus = &hsi_bus_type;
- hsi->port[i].device.release = hsi_port_release;
- hsi->port[i].device.type = &hsi_port;
- INIT_LIST_HEAD(&hsi->port[i].clients);
- spin_lock_init(&hsi->port[i].clock);
- err = device_register(&hsi->port[i].device);
+ hsi->port[i]->device.parent = &hsi->device;
+ err = device_add(&hsi->port[i]->device);
if (err < 0)
goto out;
}
@@ -192,7 +163,9 @@ int hsi_register_controller(struct hsi_controller *hsi)
return 0;
out:
- hsi_unregister_controller(hsi);
+ while (i-- > 0)
+ device_del(&hsi->port[i]->device);
+ device_del(&hsi->device);
return err;
}
@@ -223,6 +196,29 @@ static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
}
/**
+ * hsi_put_controller - Free an HSI controller
+ *
+ * @hsi: Pointer to the HSI controller to freed
+ *
+ * HSI controller drivers should only use this function if they need
+ * to free their allocated hsi_controller structures before a successful
+ * call to hsi_register_controller. Other use is not allowed.
+ */
+void hsi_put_controller(struct hsi_controller *hsi)
+{
+ unsigned int i;
+
+ if (!hsi)
+ return;
+
+ for (i = 0; i < hsi->num_ports; i++)
+ if (hsi->port && hsi->port[i])
+ put_device(&hsi->port[i]->device);
+ put_device(&hsi->device);
+}
+EXPORT_SYMBOL_GPL(hsi_put_controller);
+
+/**
* hsi_alloc_controller - Allocate an HSI controller and its ports
* @n_ports: Number of ports on the HSI controller
* @flags: Kernel allocation flags
@@ -232,55 +228,52 @@ static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags)
{
struct hsi_controller *hsi;
- struct hsi_port *port;
+ struct hsi_port **port;
unsigned int i;
if (!n_ports)
return NULL;
- port = kzalloc(sizeof(*port)*n_ports, flags);
- if (!port)
- return NULL;
hsi = kzalloc(sizeof(*hsi), flags);
if (!hsi)
- goto out;
- for (i = 0; i < n_ports; i++) {
- dev_set_name(&port[i].device, "port%d", i);
- port[i].num = i;
- port[i].async = hsi_dummy_msg;
- port[i].setup = hsi_dummy_cl;
- port[i].flush = hsi_dummy_cl;
- port[i].start_tx = hsi_dummy_cl;
- port[i].stop_tx = hsi_dummy_cl;
- port[i].release = hsi_dummy_cl;
- mutex_init(&port[i].lock);
+ return NULL;
+ port = kzalloc(sizeof(*port)*n_ports, flags);
+ if (!port) {
+ kfree(hsi);
+ return NULL;
}
hsi->num_ports = n_ports;
hsi->port = port;
+ hsi->device.release = hsi_controller_release;
+ device_initialize(&hsi->device);
+
+ for (i = 0; i < n_ports; i++) {
+ port[i] = kzalloc(sizeof(**port), flags);
+ if (port[i] == NULL)
+ goto out;
+ port[i]->num = i;
+ port[i]->async = hsi_dummy_msg;
+ port[i]->setup = hsi_dummy_cl;
+ port[i]->flush = hsi_dummy_cl;
+ port[i]->start_tx = hsi_dummy_cl;
+ port[i]->stop_tx = hsi_dummy_cl;
+ port[i]->release = hsi_dummy_cl;
+ mutex_init(&port[i]->lock);
+ ATOMIC_INIT_NOTIFIER_HEAD(&port[i]->n_head);
+ dev_set_name(&port[i]->device, "port%d", i);
+ hsi->port[i]->device.release = hsi_port_release;
+ device_initialize(&hsi->port[i]->device);
+ }
return hsi;
out:
- kfree(port);
+ hsi_put_controller(hsi);
return NULL;
}
EXPORT_SYMBOL_GPL(hsi_alloc_controller);
/**
- * hsi_free_controller - Free an HSI controller
- * @hsi: Pointer to HSI controller
- */
-void hsi_free_controller(struct hsi_controller *hsi)
-{
- if (!hsi)
- return;
-
- kfree(hsi->port);
- kfree(hsi);
-}
-EXPORT_SYMBOL_GPL(hsi_free_controller);
-
-/**
* hsi_free_msg - Free an HSI message
* @msg: Pointer to the HSI message
*
@@ -414,37 +407,67 @@ void hsi_release_port(struct hsi_client *cl)
}
EXPORT_SYMBOL_GPL(hsi_release_port);
-static int hsi_start_rx(struct hsi_client *cl, void *data __maybe_unused)
+static int hsi_event_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *data __maybe_unused)
{
- if (cl->hsi_start_rx)
- (*cl->hsi_start_rx)(cl);
+ struct hsi_client *cl = container_of(nb, struct hsi_client, nb);
+
+ (*cl->ehandler)(cl, event);
return 0;
}
-static int hsi_stop_rx(struct hsi_client *cl, void *data __maybe_unused)
+/**
+ * hsi_register_port_event - Register a client to receive port events
+ * @cl: HSI client that wants to receive port events
+ * @cb: Event handler callback
+ *
+ * Clients should register a callback to be able to receive
+ * events from the ports. Registration should happen after
+ * claiming the port.
+ * The handler can be called in interrupt context.
+ *
+ * Returns -errno on error, or 0 on success.
+ */
+int hsi_register_port_event(struct hsi_client *cl,
+ void (*handler)(struct hsi_client *, unsigned long))
{
- if (cl->hsi_stop_rx)
- (*cl->hsi_stop_rx)(cl);
+ struct hsi_port *port = hsi_get_port(cl);
- return 0;
+ if (!handler || cl->ehandler)
+ return -EINVAL;
+ if (!hsi_port_claimed(cl))
+ return -EACCES;
+ cl->ehandler = handler;
+ cl->nb.notifier_call = hsi_event_notifier_call;
+
+ return atomic_notifier_chain_register(&port->n_head, &cl->nb);
}
+EXPORT_SYMBOL_GPL(hsi_register_port_event);
-static int hsi_port_for_each_client(struct hsi_port *port, void *data,
- int (*fn)(struct hsi_client *cl, void *data))
+/**
+ * hsi_unregister_port_event - Stop receiving port events for a client
+ * @cl: HSI client that wants to stop receiving port events
+ *
+ * Clients should call this function before releasing their associated
+ * port.
+ *
+ * Returns -errno on error, or 0 on success.
+ */
+int hsi_unregister_port_event(struct hsi_client *cl)
{
- struct hsi_client *cl;
+ struct hsi_port *port = hsi_get_port(cl);
+ int err;
- spin_lock(&port->clock);
- list_for_each_entry(cl, &port->clients, link) {
- spin_unlock(&port->clock);
- (*fn)(cl, data);
- spin_lock(&port->clock);
- }
- spin_unlock(&port->clock);
+ WARN_ON(!hsi_port_claimed(cl));
- return 0;
+ err = atomic_notifier_chain_unregister(&port->n_head, &cl->nb);
+ if (!err)
+ cl->ehandler = NULL;
+
+ return err;
}
+EXPORT_SYMBOL_GPL(hsi_unregister_port_event);
/**
* hsi_event -Notifies clients about port events
@@ -458,22 +481,12 @@ static int hsi_port_for_each_client(struct hsi_port *port, void *data,
* Events:
* HSI_EVENT_START_RX - Incoming wake line high
* HSI_EVENT_STOP_RX - Incoming wake line down
+ *
+ * Returns -errno on error, or 0 on success.
*/
-void hsi_event(struct hsi_port *port, unsigned int event)
+int hsi_event(struct hsi_port *port, unsigned long event)
{
- int (*fn)(struct hsi_client *cl, void *data);
-
- switch (event) {
- case HSI_EVENT_START_RX:
- fn = hsi_start_rx;
- break;
- case HSI_EVENT_STOP_RX:
- fn = hsi_stop_rx;
- break;
- default:
- return;
- }
- hsi_port_for_each_client(port, NULL, fn);
+ return atomic_notifier_call_chain(&port->n_head, event, NULL);
}
EXPORT_SYMBOL_GPL(hsi_event);
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index ce43642..f85ce70 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -47,7 +47,7 @@ struct ad7314_data {
u16 rx ____cacheline_aligned;
};
-static int ad7314_spi_read(struct ad7314_data *chip, s16 *data)
+static int ad7314_spi_read(struct ad7314_data *chip)
{
int ret;
@@ -57,9 +57,7 @@ static int ad7314_spi_read(struct ad7314_data *chip, s16 *data)
return ret;
}
- *data = be16_to_cpu(chip->rx);
-
- return ret;
+ return be16_to_cpu(chip->rx);
}
static ssize_t ad7314_show_temperature(struct device *dev,
@@ -70,12 +68,12 @@ static ssize_t ad7314_show_temperature(struct device *dev,
s16 data;
int ret;
- ret = ad7314_spi_read(chip, &data);
+ ret = ad7314_spi_read(chip);
if (ret < 0)
return ret;
switch (spi_get_device_id(chip->spi_dev)->driver_data) {
case ad7314:
- data = (data & AD7314_TEMP_MASK) >> AD7314_TEMP_OFFSET;
+ data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_OFFSET;
data = (data << 6) >> 6;
return sprintf(buf, "%d\n", 250 * data);
@@ -86,7 +84,7 @@ static ssize_t ad7314_show_temperature(struct device *dev,
* with a sign bit - which is a 14 bit 2's complement
* register. 1lsb - 31.25 milli degrees centigrade
*/
- data &= ADT7301_TEMP_MASK;
+ data = ret & ADT7301_TEMP_MASK;
data = (data << 2) >> 2;
return sprintf(buf, "%d\n",
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
index 7765e4f..1958f03 100644
--- a/drivers/hwmon/ads1015.c
+++ b/drivers/hwmon/ads1015.c
@@ -59,14 +59,11 @@ struct ads1015_data {
struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
};
-static int ads1015_read_value(struct i2c_client *client, unsigned int channel,
- int *value)
+static int ads1015_read_adc(struct i2c_client *client, unsigned int channel)
{
u16 config;
- s16 conversion;
struct ads1015_data *data = i2c_get_clientdata(client);
unsigned int pga = data->channel_data[channel].pga;
- int fullscale;
unsigned int data_rate = data->channel_data[channel].data_rate;
unsigned int conversion_time_ms;
int res;
@@ -78,7 +75,6 @@ static int ads1015_read_value(struct i2c_client *client, unsigned int channel,
if (res < 0)
goto err_unlock;
config = res;
- fullscale = fullscale_table[pga];
conversion_time_ms = DIV_ROUND_UP(1000, data_rate_table[data_rate]);
/* setup and start single conversion */
@@ -105,33 +101,36 @@ static int ads1015_read_value(struct i2c_client *client, unsigned int channel,
}
res = i2c_smbus_read_word_swapped(client, ADS1015_CONVERSION);
- if (res < 0)
- goto err_unlock;
- conversion = res;
-
- mutex_unlock(&data->update_lock);
-
- *value = DIV_ROUND_CLOSEST(conversion * fullscale, 0x7ff0);
-
- return 0;
err_unlock:
mutex_unlock(&data->update_lock);
return res;
}
+static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
+ s16 reg)
+{
+ struct ads1015_data *data = i2c_get_clientdata(client);
+ unsigned int pga = data->channel_data[channel].pga;
+ int fullscale = fullscale_table[pga];
+
+ return DIV_ROUND_CLOSEST(reg * fullscale, 0x7ff0);
+}
+
/* sysfs callback function */
static ssize_t show_in(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct i2c_client *client = to_i2c_client(dev);
- int in;
int res;
+ int index = attr->index;
- res = ads1015_read_value(client, attr->index, &in);
+ res = ads1015_read_adc(client, index);
+ if (res < 0)
+ return res;
- return (res < 0) ? res : sprintf(buf, "%d\n", in);
+ return sprintf(buf, "%d\n", ads1015_reg_to_mv(client, index, res));
}
static const struct sensor_device_attribute ads1015_in[] = {
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 0d3141f..b9d5123 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -52,7 +52,7 @@ module_param_named(tjmax, force_tjmax, int, 0444);
MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
-#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */
+#define NUM_REAL_CORES 32 /* Number of Real cores per cpu */
#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
@@ -709,6 +709,10 @@ static void __cpuinit put_core_offline(unsigned int cpu)
indx = TO_ATTR_NO(cpu);
+ /* The core id is too big, just return */
+ if (indx > MAX_CORE_DATA - 1)
+ return;
+
if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu)
coretemp_remove_core(pdata, &pdev->dev, indx);
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index b7494af..e8e18ca 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -122,6 +122,41 @@ static bool __devinit fam15h_power_is_internal_node0(struct pci_dev *f4)
return true;
}
+/*
+ * Newer BKDG versions have an updated recommendation on how to properly
+ * initialize the running average range (was: 0xE, now: 0x9). This avoids
+ * counter saturations resulting in bogus power readings.
+ * We correct this value ourselves to cope with older BIOSes.
+ */
+static DEFINE_PCI_DEVICE_TABLE(affected_device) = {
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
+ { 0 }
+};
+
+static void __devinit tweak_runavg_range(struct pci_dev *pdev)
+{
+ u32 val;
+
+ /*
+ * let this quirk apply only to the current version of the
+ * northbridge, since future versions may change the behavior
+ */
+ if (!pci_match_id(affected_device, pdev))
+ return;
+
+ pci_bus_read_config_dword(pdev->bus,
+ PCI_DEVFN(PCI_SLOT(pdev->devfn), 5),
+ REG_TDP_RUNNING_AVERAGE, &val);
+ if ((val & 0xf) != 0xe)
+ return;
+
+ val &= ~0xf;
+ val |= 0x9;
+ pci_bus_write_config_dword(pdev->bus,
+ PCI_DEVFN(PCI_SLOT(pdev->devfn), 5),
+ REG_TDP_RUNNING_AVERAGE, val);
+}
+
static void __devinit fam15h_power_init_data(struct pci_dev *f4,
struct fam15h_power_data *data)
{
@@ -155,6 +190,13 @@ static int __devinit fam15h_power_probe(struct pci_dev *pdev,
struct device *dev;
int err;
+ /*
+ * though we ignore every other northbridge, we still have to
+ * do the tweaking on _each_ node in MCM processors as the counters
+ * are working hand-in-hand
+ */
+ tweak_runavg_range(pdev);
+
if (!fam15h_power_is_internal_node0(pdev)) {
err = -ENODEV;
goto exit;
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index f086131..c811289b 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -324,7 +324,7 @@ static s32 pch_i2c_wait_for_xfer_complete(struct i2c_algo_pch_data *adap)
{
long ret;
ret = wait_event_timeout(pch_event,
- (adap->pch_event_flag != 0), msecs_to_jiffies(50));
+ (adap->pch_event_flag != 0), msecs_to_jiffies(1000));
if (ret == 0) {
pch_err(adap, "timeout: %x\n", adap->pch_event_flag);
@@ -1063,6 +1063,6 @@ module_exit(pch_pci_exit);
MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semico ML7213/ML7223/ML7831 IOH I2C");
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.lapis-semi.com>");
+MODULE_AUTHOR("Tomoya MORINAGA. <tomoya.rohm@gmail.com>");
module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR));
module_param(pch_clk, int, (S_IRUSR | S_IWUSR));
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 3d471d5..76b8af4 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -227,6 +227,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
return -EINVAL;
init_completion(&i2c->cmd_complete);
+ i2c->cmd_err = 0;
flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0;
@@ -252,6 +253,9 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
if (i2c->cmd_err == -ENXIO)
mxs_i2c_reset(i2c);
+ else
+ writel(MXS_I2C_QUEUECTRL_QUEUE_RUN,
+ i2c->regs + MXS_I2C_QUEUECTRL_CLR);
dev_dbg(i2c->dev, "Done with err=%d\n", i2c->cmd_err);
@@ -299,8 +303,6 @@ static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id)
MXS_I2C_CTRL1_SLAVE_STOP_IRQ | MXS_I2C_CTRL1_SLAVE_IRQ))
/* MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ is only for slaves */
i2c->cmd_err = -EIO;
- else
- i2c->cmd_err = 0;
is_last_cmd = (readl(i2c->regs + MXS_I2C_QUEUESTAT) &
MXS_I2C_QUEUESTAT_WRITE_QUEUE_CNT_MASK) == 0;
@@ -384,8 +386,6 @@ static int __devexit mxs_i2c_remove(struct platform_device *pdev)
if (ret)
return -EBUSY;
- writel(MXS_I2C_QUEUECTRL_QUEUE_RUN,
- i2c->regs + MXS_I2C_QUEUECTRL_CLR);
writel(MXS_I2C_CTRL0_SFTRST, i2c->regs + MXS_I2C_CTRL0_SET);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 04be9f82..eb8ad53 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -546,8 +546,7 @@ static int i2c_pnx_controller_suspend(struct platform_device *pdev,
{
struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev);
- /* FIXME: shouldn't this be clk_disable? */
- clk_enable(alg_data->clk);
+ clk_disable(alg_data->clk);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index e978635..55e5ea6 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -516,6 +516,14 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
return 0;
+ /*
+ * NACK interrupt is generated before the I2C controller generates the
+ * STOP condition on the bus. So wait for 2 clock periods before resetting
+ * the controller so that STOP condition has been delivered properly.
+ */
+ if (i2c_dev->msg_err == I2C_ERR_NO_ACK)
+ udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
+
tegra_i2c_init(i2c_dev);
if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
if (msg->flags & I2C_M_IGNORE_NAK)
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 426bb76..b0d0bc8 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1854,6 +1854,8 @@ static bool generate_unmatched_resp(struct ib_mad_private *recv,
response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
response->mad.mad.mad_hdr.status =
cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
+ if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ response->mad.mad.mad_hdr.status |= IB_SMP_DIRECTION;
return true;
} else {
@@ -1869,6 +1871,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
struct ib_mad_list_head *mad_list;
struct ib_mad_agent_private *mad_agent;
int port_num;
+ int ret = IB_MAD_RESULT_SUCCESS;
mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
qp_info = mad_list->mad_queue->qp_info;
@@ -1952,8 +1955,6 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
local:
/* Give driver "right of first refusal" on incoming MAD */
if (port_priv->device->process_mad) {
- int ret;
-
ret = port_priv->device->process_mad(port_priv->device, 0,
port_priv->port_num,
wc, &recv->grh,
@@ -1981,7 +1982,8 @@ local:
* or via recv_handler in ib_mad_complete_recv()
*/
recv = NULL;
- } else if (generate_unmatched_resp(recv, response)) {
+ } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
+ generate_unmatched_resp(recv, response)) {
agent_send_response(&response->mad.mad, &recv->grh, wc,
port_priv->device, port_num, qp_info->qp->qp_num);
}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 669673e..b948b6d 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -247,7 +247,7 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port,
err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port,
NULL, NULL, in_mad, out_mad);
if (err)
- return err;
+ goto out;
/* Checking LinkSpeedActive for FDR-10 */
if (out_mad->data[15] & 0x1)
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 2d78779..7faf4a7 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -380,8 +380,7 @@ config INPUT_TWL4030_VIBRA
config INPUT_TWL6040_VIBRA
tristate "Support for TWL6040 Vibrator"
- depends on TWL4030_CORE
- select TWL6040_CORE
+ depends on TWL6040_CORE
select INPUT_FF_MEMLESS
help
This option enables support for TWL6040 Vibrator Driver.
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index 45874fe..14e94f5 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -28,7 +28,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
-#include <linux/i2c/twl.h>
+#include <linux/input.h>
#include <linux/mfd/twl6040.h>
#include <linux/slab.h>
#include <linux/delay.h>
@@ -257,7 +257,7 @@ static SIMPLE_DEV_PM_OPS(twl6040_vibra_pm_ops, twl6040_vibra_suspend, NULL);
static int __devinit twl6040_vibra_probe(struct platform_device *pdev)
{
- struct twl4030_vibra_data *pdata = pdev->dev.platform_data;
+ struct twl6040_vibra_data *pdata = pdev->dev.platform_data;
struct vibra_info *info;
int ret;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 8081a0a..a4b14a4 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -274,7 +274,8 @@ static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
static unsigned char param = 0xc8;
struct synaptics_data *priv = psmouse->private;
- if (!SYN_CAP_ADV_GESTURE(priv->ext_cap_0c))
+ if (!(SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
+ SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)))
return 0;
if (psmouse_sliced_command(psmouse, SYN_QUE_MODEL))
diff --git a/drivers/leds/leds-atmel-pwm.c b/drivers/leds/leds-atmel-pwm.c
index 800243b..64ad702 100644
--- a/drivers/leds/leds-atmel-pwm.c
+++ b/drivers/leds/leds-atmel-pwm.c
@@ -35,7 +35,7 @@ static void pwmled_brightness(struct led_classdev *cdev, enum led_brightness b)
* NOTE: we reuse the platform_data structure of GPIO leds,
* but repurpose its "gpio" number as a PWM channel number.
*/
-static int __init pwmled_probe(struct platform_device *pdev)
+static int __devinit pwmled_probe(struct platform_device *pdev)
{
const struct gpio_led_platform_data *pdata;
struct pwmled *leds;
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index d8433f2..73973fd 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -112,7 +112,7 @@ err_free_addr:
return err;
}
-static void __devexit gpio_ext_free(struct netxbig_gpio_ext *gpio_ext)
+static void gpio_ext_free(struct netxbig_gpio_ext *gpio_ext)
{
int i;
@@ -294,7 +294,7 @@ static ssize_t netxbig_led_sata_show(struct device *dev,
static DEVICE_ATTR(sata, 0644, netxbig_led_sata_show, netxbig_led_sata_store);
-static void __devexit delete_netxbig_led(struct netxbig_led_data *led_dat)
+static void delete_netxbig_led(struct netxbig_led_data *led_dat)
{
if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
device_remove_file(led_dat->cdev.dev, &dev_attr_sata);
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 2f0a144..01cf89e 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -255,7 +255,7 @@ err_free_cmd:
return ret;
}
-static void __devexit delete_ns2_led(struct ns2_led_data *led_dat)
+static void delete_ns2_led(struct ns2_led_data *led_dat)
{
device_remove_file(led_dat->cdev.dev, &dev_attr_sata);
led_classdev_unregister(&led_dat->cdev);
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 97e73e5..17e2b47 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1727,8 +1727,7 @@ int bitmap_create(struct mddev *mddev)
bitmap->chunkshift = (ffz(~mddev->bitmap_info.chunksize)
- BITMAP_BLOCK_SHIFT);
- /* now that chunksize and chunkshift are set, we can use these macros */
- chunks = (blocks + bitmap->chunkshift - 1) >>
+ chunks = (blocks + (1 << bitmap->chunkshift) - 1) >>
bitmap->chunkshift;
pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index 55ca5ae..b44b0aba 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -101,9 +101,6 @@ typedef __u16 bitmap_counter_t;
#define BITMAP_BLOCK_SHIFT 9
-/* how many blocks per chunk? (this is variable) */
-#define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->mddev->bitmap_info.chunksize >> BITMAP_BLOCK_SHIFT)
-
#endif
/*
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 1f23e04..08d9a20 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -134,7 +134,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
{
struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1);
- if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
+ if (!capable(CAP_SYS_ADMIN))
return;
spin_lock(&receiving_list_lock);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 922a338..754f38f 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -718,8 +718,8 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
return 0;
m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
- request_module("scsi_dh_%s", m->hw_handler_name);
- if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
+ if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name),
+ "scsi_dh_%s", m->hw_handler_name)) {
ti->error = "unknown hardware handler type";
ret = -EINVAL;
goto fail;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index b0ba524..68965e6 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -859,7 +859,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
int ret;
unsigned redundancy = 0;
struct raid_dev *dev;
- struct md_rdev *rdev, *freshest;
+ struct md_rdev *rdev, *tmp, *freshest;
struct mddev *mddev = &rs->md;
switch (rs->raid_type->level) {
@@ -877,7 +877,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
}
freshest = NULL;
- rdev_for_each(rdev, mddev) {
+ rdev_for_each_safe(rdev, tmp, mddev) {
if (!rdev->meta_bdev)
continue;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 213ae32..2fd87b5 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -279,8 +279,10 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates)
hlist_del(&cell->list);
- bio_list_add(inmates, cell->holder);
- bio_list_merge(inmates, &cell->bios);
+ if (inmates) {
+ bio_list_add(inmates, cell->holder);
+ bio_list_merge(inmates, &cell->bios);
+ }
mempool_free(cell, prison->cell_pool);
}
@@ -303,9 +305,10 @@ static void cell_release(struct cell *cell, struct bio_list *bios)
*/
static void __cell_release_singleton(struct cell *cell, struct bio *bio)
{
- hlist_del(&cell->list);
BUG_ON(cell->holder != bio);
BUG_ON(!bio_list_empty(&cell->bios));
+
+ __cell_release(cell, NULL);
}
static void cell_release_singleton(struct cell *cell, struct bio *bio)
@@ -1177,6 +1180,7 @@ static void no_space(struct cell *cell)
static void process_discard(struct thin_c *tc, struct bio *bio)
{
int r;
+ unsigned long flags;
struct pool *pool = tc->pool;
struct cell *cell, *cell2;
struct cell_key key, key2;
@@ -1218,7 +1222,9 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
m->bio = bio;
if (!ds_add_work(&pool->all_io_ds, &m->list)) {
+ spin_lock_irqsave(&pool->lock, flags);
list_add(&m->list, &pool->prepared_discards);
+ spin_unlock_irqrestore(&pool->lock, flags);
wake_worker(pool);
}
} else {
@@ -2626,8 +2632,10 @@ static int thin_endio(struct dm_target *ti,
if (h->all_io_entry) {
INIT_LIST_HEAD(&work);
ds_dec(h->all_io_entry, &work);
+ spin_lock_irqsave(&pool->lock, flags);
list_for_each_entry_safe(m, tmp, &work, list)
list_add(&m->list, &pool->prepared_discards);
+ spin_unlock_irqrestore(&pool->lock, flags);
}
mempool_free(h, pool->endio_hook_pool);
@@ -2759,6 +2767,6 @@ static void dm_thin_exit(void)
module_init(dm_thin_init);
module_exit(dm_thin_exit);
-MODULE_DESCRIPTION(DM_NAME "device-mapper thin provisioning target");
+MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b572e1e..477eb2e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7560,14 +7560,14 @@ void md_check_recovery(struct mddev *mddev)
* any transients in the value of "sync_action".
*/
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
- clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
/* Clear some bits that don't mean anything, but
* might be left set
*/
clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
- if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
+ if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
goto unlock;
/* no recovery is running.
* remove any failed drives, then
@@ -8140,7 +8140,8 @@ static int md_notify_reboot(struct notifier_block *this,
for_each_mddev(mddev, tmp) {
if (mddev_trylock(mddev)) {
- __md_stop_writes(mddev);
+ if (mddev->pers)
+ __md_stop_writes(mddev);
mddev->safemode = 2;
mddev_unlock(mddev);
}
diff --git a/drivers/media/common/tuners/xc5000.c b/drivers/media/common/tuners/xc5000.c
index 7f98984..eab2ea4 100644
--- a/drivers/media/common/tuners/xc5000.c
+++ b/drivers/media/common/tuners/xc5000.c
@@ -54,6 +54,7 @@ struct xc5000_priv {
struct list_head hybrid_tuner_instance_list;
u32 if_khz;
+ u32 xtal_khz;
u32 freq_hz;
u32 bandwidth;
u8 video_standard;
@@ -214,9 +215,9 @@ static const struct xc5000_fw_cfg xc5000a_1_6_114 = {
.size = 12401,
};
-static const struct xc5000_fw_cfg xc5000c_41_024_5_31875 = {
- .name = "dvb-fe-xc5000c-41.024.5-31875.fw",
- .size = 16503,
+static const struct xc5000_fw_cfg xc5000c_41_024_5 = {
+ .name = "dvb-fe-xc5000c-41.024.5.fw",
+ .size = 16497,
};
static inline const struct xc5000_fw_cfg *xc5000_assign_firmware(int chip_id)
@@ -226,7 +227,7 @@ static inline const struct xc5000_fw_cfg *xc5000_assign_firmware(int chip_id)
case XC5000A:
return &xc5000a_1_6_114;
case XC5000C:
- return &xc5000c_41_024_5_31875;
+ return &xc5000c_41_024_5;
}
}
@@ -572,6 +573,31 @@ static int xc_tune_channel(struct xc5000_priv *priv, u32 freq_hz, int mode)
return found;
}
+static int xc_set_xtal(struct dvb_frontend *fe)
+{
+ struct xc5000_priv *priv = fe->tuner_priv;
+ int ret = XC_RESULT_SUCCESS;
+
+ switch (priv->chip_id) {
+ default:
+ case XC5000A:
+ /* 32.000 MHz xtal is default */
+ break;
+ case XC5000C:
+ switch (priv->xtal_khz) {
+ default:
+ case 32000:
+ /* 32.000 MHz xtal is default */
+ break;
+ case 31875:
+ /* 31.875 MHz xtal configuration */
+ ret = xc_write_reg(priv, 0x000f, 0x8081);
+ break;
+ }
+ break;
+ }
+ return ret;
+}
static int xc5000_fwupload(struct dvb_frontend *fe)
{
@@ -603,6 +629,8 @@ static int xc5000_fwupload(struct dvb_frontend *fe)
} else {
printk(KERN_INFO "xc5000: firmware uploading...\n");
ret = xc_load_i2c_sequence(fe, fw->data);
+ if (XC_RESULT_SUCCESS == ret)
+ ret = xc_set_xtal(fe);
printk(KERN_INFO "xc5000: firmware upload complete...\n");
}
@@ -1164,6 +1192,9 @@ struct dvb_frontend *xc5000_attach(struct dvb_frontend *fe,
priv->if_khz = cfg->if_khz;
}
+ if (priv->xtal_khz == 0)
+ priv->xtal_khz = cfg->xtal_khz;
+
if (priv->radio_input == 0)
priv->radio_input = cfg->radio_input;
diff --git a/drivers/media/common/tuners/xc5000.h b/drivers/media/common/tuners/xc5000.h
index 3396f8e..39a73bf 100644
--- a/drivers/media/common/tuners/xc5000.h
+++ b/drivers/media/common/tuners/xc5000.h
@@ -34,6 +34,7 @@ struct xc5000_config {
u8 i2c_address;
u32 if_khz;
u8 radio_input;
+ u32 xtal_khz;
int chip_id;
};
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 39696c6..0f64d71 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -1446,6 +1446,28 @@ static int set_delivery_system(struct dvb_frontend *fe, u32 desired_system)
__func__);
return -EINVAL;
}
+ /*
+ * Get a delivery system that is compatible with DVBv3
+ * NOTE: in order for this to work with softwares like Kaffeine that
+ * uses a DVBv5 call for DVB-S2 and a DVBv3 call to go back to
+ * DVB-S, drivers that support both should put the SYS_DVBS entry
+ * before the SYS_DVBS2, otherwise it won't switch back to DVB-S.
+ * The real fix is that userspace applications should not use DVBv3
+ * and not trust on calling FE_SET_FRONTEND to switch the delivery
+ * system.
+ */
+ ncaps = 0;
+ while (fe->ops.delsys[ncaps] && ncaps < MAX_DELSYS) {
+ if (fe->ops.delsys[ncaps] == desired_system) {
+ delsys = desired_system;
+ break;
+ }
+ ncaps++;
+ }
+ if (delsys == SYS_UNDEFINED) {
+ dprintk("%s() Couldn't find a delivery system that matches %d\n",
+ __func__, desired_system);
+ }
} else {
/*
* This is a DVBv5 call. So, it likely knows the supported
@@ -1494,9 +1516,10 @@ static int set_delivery_system(struct dvb_frontend *fe, u32 desired_system)
__func__);
return -EINVAL;
}
- c->delivery_system = delsys;
}
+ c->delivery_system = delsys;
+
/*
* The DVBv3 or DVBv5 call is requesting a different system. So,
* emulation is needed.
diff --git a/drivers/media/dvb/frontends/drxk_hard.c b/drivers/media/dvb/frontends/drxk_hard.c
index 36d1175..a414b1f 100644
--- a/drivers/media/dvb/frontends/drxk_hard.c
+++ b/drivers/media/dvb/frontends/drxk_hard.c
@@ -1520,8 +1520,10 @@ static int scu_command(struct drxk_state *state,
dprintk(1, "\n");
if ((cmd == 0) || ((parameterLen > 0) && (parameter == NULL)) ||
- ((resultLen > 0) && (result == NULL)))
- goto error;
+ ((resultLen > 0) && (result == NULL))) {
+ printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__);
+ return status;
+ }
mutex_lock(&state->mutex);
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index b09c5fa..af52658 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -1046,6 +1046,7 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
goto exit_unregister_led;
}
+ data->dev->driver_type = RC_DRIVER_IR_RAW;
data->dev->driver_name = WBCIR_NAME;
data->dev->input_name = WBCIR_NAME;
data->dev->input_phys = "wbcir/cir0";
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index f2479c5..ce1e7ba 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -492,7 +492,7 @@ config VIDEO_VS6624
config VIDEO_MT9M032
tristate "MT9M032 camera sensor support"
- depends on I2C && VIDEO_V4L2
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
select VIDEO_APTINA_PLL
---help---
This driver supports MT9M032 camera sensors from Aptina, monochrome
diff --git a/drivers/media/video/mt9m032.c b/drivers/media/video/mt9m032.c
index 7636672..645973c 100644
--- a/drivers/media/video/mt9m032.c
+++ b/drivers/media/video/mt9m032.c
@@ -392,10 +392,11 @@ static int mt9m032_set_pad_format(struct v4l2_subdev *subdev,
}
/* Scaling is not supported, the format is thus fixed. */
- ret = mt9m032_get_pad_format(subdev, fh, fmt);
+ fmt->format = *__mt9m032_get_pad_format(sensor, fh, fmt->which);
+ ret = 0;
done:
- mutex_lock(&sensor->lock);
+ mutex_unlock(&sensor->lock);
return ret;
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 29f463c..11e44386 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -268,10 +268,17 @@ config TWL6030_PWM
This is used to control charging LED brightness.
config TWL6040_CORE
- bool
- depends on TWL4030_CORE && GENERIC_HARDIRQS
+ bool "Support for TWL6040 audio codec"
+ depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
+ select REGMAP_I2C
default n
+ help
+ Say yes here if you want support for Texas Instruments TWL6040 audio
+ codec.
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the
+ functionality of the device (audio, vibra).
config MFD_STMPE
bool "Support STMicroelectronics STMPE"
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 1895cf9..1582c3d 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -527,7 +527,9 @@ static void asic3_gpio_set(struct gpio_chip *chip,
static int asic3_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
- return (offset < ASIC3_NUM_GPIOS) ? IRQ_BOARD_START + offset : -ENXIO;
+ struct asic3 *asic = container_of(chip, struct asic3, gpio);
+
+ return (offset < ASIC3_NUM_GPIOS) ? asic->irq_base + offset : -ENXIO;
}
static __init int asic3_gpio_probe(struct platform_device *pdev,
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 95a2e54..7e96bb2 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -25,7 +25,7 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
-#include <linux/gpio.h>
+#include <plat/cpu.h>
#include <plat/usb.h>
#include <linux/pm_runtime.h>
@@ -502,19 +502,6 @@ static void omap_usbhs_init(struct device *dev)
pm_runtime_get_sync(dev);
spin_lock_irqsave(&omap->lock, flags);
- if (pdata->ehci_data->phy_reset) {
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
- gpio_request_one(pdata->ehci_data->reset_gpio_port[0],
- GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
-
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
- gpio_request_one(pdata->ehci_data->reset_gpio_port[1],
- GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
-
- /* Hold the PHY in RESET for enough time till DIR is high */
- udelay(10);
- }
-
omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
@@ -593,39 +580,10 @@ static void omap_usbhs_init(struct device *dev)
usbhs_omap_tll_init(dev, OMAP_TLL_CHANNEL_COUNT);
}
- if (pdata->ehci_data->phy_reset) {
- /* Hold the PHY in RESET for enough time till
- * PHY is settled and ready
- */
- udelay(10);
-
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
- gpio_set_value
- (pdata->ehci_data->reset_gpio_port[0], 1);
-
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
- gpio_set_value
- (pdata->ehci_data->reset_gpio_port[1], 1);
- }
-
spin_unlock_irqrestore(&omap->lock, flags);
pm_runtime_put_sync(dev);
}
-static void omap_usbhs_deinit(struct device *dev)
-{
- struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
- struct usbhs_omap_platform_data *pdata = &omap->platdata;
-
- if (pdata->ehci_data->phy_reset) {
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
- gpio_free(pdata->ehci_data->reset_gpio_port[0]);
-
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
- gpio_free(pdata->ehci_data->reset_gpio_port[1]);
- }
-}
-
/**
* usbhs_omap_probe - initialize TI-based HCDs
@@ -860,7 +818,6 @@ static int __devexit usbhs_omap_remove(struct platform_device *pdev)
{
struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
- omap_usbhs_deinit(&pdev->dev);
iounmap(omap->tll_base);
iounmap(omap->uhh_base);
clk_put(omap->init_60m_fclk);
diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c
index 99ef944..44afae0 100644
--- a/drivers/mfd/rc5t583.c
+++ b/drivers/mfd/rc5t583.c
@@ -80,44 +80,6 @@ static struct mfd_cell rc5t583_subdevs[] = {
{.name = "rc5t583-key", }
};
-int rc5t583_write(struct device *dev, uint8_t reg, uint8_t val)
-{
- struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
- return regmap_write(rc5t583->regmap, reg, val);
-}
-
-int rc5t583_read(struct device *dev, uint8_t reg, uint8_t *val)
-{
- struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
- unsigned int ival;
- int ret;
- ret = regmap_read(rc5t583->regmap, reg, &ival);
- if (!ret)
- *val = (uint8_t)ival;
- return ret;
-}
-
-int rc5t583_set_bits(struct device *dev, unsigned int reg,
- unsigned int bit_mask)
-{
- struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
- return regmap_update_bits(rc5t583->regmap, reg, bit_mask, bit_mask);
-}
-
-int rc5t583_clear_bits(struct device *dev, unsigned int reg,
- unsigned int bit_mask)
-{
- struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
- return regmap_update_bits(rc5t583->regmap, reg, bit_mask, 0);
-}
-
-int rc5t583_update(struct device *dev, unsigned int reg,
- unsigned int val, unsigned int mask)
-{
- struct rc5t583 *rc5t583 = dev_get_drvdata(dev);
- return regmap_update_bits(rc5t583->regmap, reg, mask, val);
-}
-
static int __rc5t583_set_ext_pwrreq1_control(struct device *dev,
int id, int ext_pwr, int slots)
{
@@ -197,6 +159,7 @@ int rc5t583_ext_power_req_config(struct device *dev, int ds_id,
ds_id, ext_pwr_req);
return 0;
}
+EXPORT_SYMBOL(rc5t583_ext_power_req_config);
static int rc5t583_clear_ext_power_req(struct rc5t583 *rc5t583,
struct rc5t583_platform_data *pdata)
diff --git a/drivers/mfd/twl6040-core.c b/drivers/mfd/twl6040-core.c
index b2d8e51..2d6beda 100644
--- a/drivers/mfd/twl6040-core.c
+++ b/drivers/mfd/twl6040-core.c
@@ -30,7 +30,9 @@
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/delay.h>
-#include <linux/i2c/twl.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/err.h>
#include <linux/mfd/core.h>
#include <linux/mfd/twl6040.h>
@@ -39,7 +41,7 @@
int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg)
{
int ret;
- u8 val = 0;
+ unsigned int val;
mutex_lock(&twl6040->io_mutex);
/* Vibra control registers from cache */
@@ -47,7 +49,7 @@ int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg)
reg == TWL6040_REG_VIBCTLR)) {
val = twl6040->vibra_ctrl_cache[VIBRACTRL_MEMBER(reg)];
} else {
- ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg);
+ ret = regmap_read(twl6040->regmap, reg, &val);
if (ret < 0) {
mutex_unlock(&twl6040->io_mutex);
return ret;
@@ -64,7 +66,7 @@ int twl6040_reg_write(struct twl6040 *twl6040, unsigned int reg, u8 val)
int ret;
mutex_lock(&twl6040->io_mutex);
- ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg);
+ ret = regmap_write(twl6040->regmap, reg, val);
/* Cache the vibra control registers */
if (reg == TWL6040_REG_VIBCTLL || reg == TWL6040_REG_VIBCTLR)
twl6040->vibra_ctrl_cache[VIBRACTRL_MEMBER(reg)] = val;
@@ -77,16 +79,9 @@ EXPORT_SYMBOL(twl6040_reg_write);
int twl6040_set_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask)
{
int ret;
- u8 val;
mutex_lock(&twl6040->io_mutex);
- ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg);
- if (ret)
- goto out;
-
- val |= mask;
- ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg);
-out:
+ ret = regmap_update_bits(twl6040->regmap, reg, mask, mask);
mutex_unlock(&twl6040->io_mutex);
return ret;
}
@@ -95,16 +90,9 @@ EXPORT_SYMBOL(twl6040_set_bits);
int twl6040_clear_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask)
{
int ret;
- u8 val;
mutex_lock(&twl6040->io_mutex);
- ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg);
- if (ret)
- goto out;
-
- val &= ~mask;
- ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg);
-out:
+ ret = regmap_update_bits(twl6040->regmap, reg, mask, 0);
mutex_unlock(&twl6040->io_mutex);
return ret;
}
@@ -494,32 +482,58 @@ static struct resource twl6040_codec_rsrc[] = {
},
};
-static int __devinit twl6040_probe(struct platform_device *pdev)
+static bool twl6040_readable_reg(struct device *dev, unsigned int reg)
{
- struct twl4030_audio_data *pdata = pdev->dev.platform_data;
+ /* Register 0 is not readable */
+ if (!reg)
+ return false;
+ return true;
+}
+
+static struct regmap_config twl6040_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = TWL6040_REG_STATUS, /* 0x2e */
+
+ .readable_reg = twl6040_readable_reg,
+};
+
+static int __devinit twl6040_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct twl6040_platform_data *pdata = client->dev.platform_data;
struct twl6040 *twl6040;
struct mfd_cell *cell = NULL;
int ret, children = 0;
if (!pdata) {
- dev_err(&pdev->dev, "Platform data is missing\n");
+ dev_err(&client->dev, "Platform data is missing\n");
return -EINVAL;
}
/* In order to operate correctly we need valid interrupt config */
- if (!pdata->naudint_irq || !pdata->irq_base) {
- dev_err(&pdev->dev, "Invalid IRQ configuration\n");
+ if (!client->irq || !pdata->irq_base) {
+ dev_err(&client->dev, "Invalid IRQ configuration\n");
return -EINVAL;
}
- twl6040 = kzalloc(sizeof(struct twl6040), GFP_KERNEL);
- if (!twl6040)
- return -ENOMEM;
+ twl6040 = devm_kzalloc(&client->dev, sizeof(struct twl6040),
+ GFP_KERNEL);
+ if (!twl6040) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ twl6040->regmap = regmap_init_i2c(client, &twl6040_regmap_config);
+ if (IS_ERR(twl6040->regmap)) {
+ ret = PTR_ERR(twl6040->regmap);
+ goto err;
+ }
- platform_set_drvdata(pdev, twl6040);
+ i2c_set_clientdata(client, twl6040);
- twl6040->dev = &pdev->dev;
- twl6040->irq = pdata->naudint_irq;
+ twl6040->dev = &client->dev;
+ twl6040->irq = client->irq;
twl6040->irq_base = pdata->irq_base;
mutex_init(&twl6040->mutex);
@@ -588,12 +602,12 @@ static int __devinit twl6040_probe(struct platform_device *pdev)
}
if (children) {
- ret = mfd_add_devices(&pdev->dev, pdev->id, twl6040->cells,
+ ret = mfd_add_devices(&client->dev, -1, twl6040->cells,
children, NULL, 0);
if (ret)
goto mfd_err;
} else {
- dev_err(&pdev->dev, "No platform data found for children\n");
+ dev_err(&client->dev, "No platform data found for children\n");
ret = -ENODEV;
goto mfd_err;
}
@@ -608,14 +622,15 @@ gpio2_err:
if (gpio_is_valid(twl6040->audpwron))
gpio_free(twl6040->audpwron);
gpio1_err:
- platform_set_drvdata(pdev, NULL);
- kfree(twl6040);
+ i2c_set_clientdata(client, NULL);
+ regmap_exit(twl6040->regmap);
+err:
return ret;
}
-static int __devexit twl6040_remove(struct platform_device *pdev)
+static int __devexit twl6040_remove(struct i2c_client *client)
{
- struct twl6040 *twl6040 = platform_get_drvdata(pdev);
+ struct twl6040 *twl6040 = i2c_get_clientdata(client);
if (twl6040->power_count)
twl6040_power(twl6040, 0);
@@ -626,23 +641,30 @@ static int __devexit twl6040_remove(struct platform_device *pdev)
free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
twl6040_irq_exit(twl6040);
- mfd_remove_devices(&pdev->dev);
- platform_set_drvdata(pdev, NULL);
- kfree(twl6040);
+ mfd_remove_devices(&client->dev);
+ i2c_set_clientdata(client, NULL);
+ regmap_exit(twl6040->regmap);
return 0;
}
-static struct platform_driver twl6040_driver = {
+static const struct i2c_device_id twl6040_i2c_id[] = {
+ { "twl6040", 0, },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, twl6040_i2c_id);
+
+static struct i2c_driver twl6040_driver = {
+ .driver = {
+ .name = "twl6040",
+ .owner = THIS_MODULE,
+ },
.probe = twl6040_probe,
.remove = __devexit_p(twl6040_remove),
- .driver = {
- .owner = THIS_MODULE,
- .name = "twl6040",
- },
+ .id_table = twl6040_i2c_id,
};
-module_platform_driver(twl6040_driver);
+module_i2c_driver(twl6040_driver);
MODULE_DESCRIPTION("TWL6040 MFD");
MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>");
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index b180965..dabec55 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -873,7 +873,7 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
{
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
- unsigned int from, nr, arg;
+ unsigned int from, nr, arg, trim_arg, erase_arg;
int err = 0, type = MMC_BLK_SECDISCARD;
if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
@@ -881,20 +881,26 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
goto out;
}
+ from = blk_rq_pos(req);
+ nr = blk_rq_sectors(req);
+
/* The sanitize operation is supported at v4.5 only */
if (mmc_can_sanitize(card)) {
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_SANITIZE_START, 1, 0);
- goto out;
+ erase_arg = MMC_ERASE_ARG;
+ trim_arg = MMC_TRIM_ARG;
+ } else {
+ erase_arg = MMC_SECURE_ERASE_ARG;
+ trim_arg = MMC_SECURE_TRIM1_ARG;
}
- from = blk_rq_pos(req);
- nr = blk_rq_sectors(req);
-
- if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
- arg = MMC_SECURE_TRIM1_ARG;
- else
- arg = MMC_SECURE_ERASE_ARG;
+ if (mmc_erase_group_aligned(card, from, nr))
+ arg = erase_arg;
+ else if (mmc_can_trim(card))
+ arg = trim_arg;
+ else {
+ err = -EINVAL;
+ goto out;
+ }
retry:
if (card->quirks & MMC_QUIRK_INAND_CMD38) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -904,25 +910,41 @@ retry:
INAND_CMD38_ARG_SECERASE,
0);
if (err)
- goto out;
+ goto out_retry;
}
+
err = mmc_erase(card, from, nr, arg);
- if (!err && arg == MMC_SECURE_TRIM1_ARG) {
+ if (err == -EIO)
+ goto out_retry;
+ if (err)
+ goto out;
+
+ if (arg == MMC_SECURE_TRIM1_ARG) {
if (card->quirks & MMC_QUIRK_INAND_CMD38) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
INAND_CMD38_ARG_EXT_CSD,
INAND_CMD38_ARG_SECTRIM2,
0);
if (err)
- goto out;
+ goto out_retry;
}
+
err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
+ if (err == -EIO)
+ goto out_retry;
+ if (err)
+ goto out;
}
-out:
- if (err == -EIO && !mmc_blk_reset(md, card->host, type))
+
+ if (mmc_can_sanitize(card))
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_SANITIZE_START, 1, 0);
+out_retry:
+ if (err && !mmc_blk_reset(md, card->host, type))
goto retry;
if (!err)
mmc_blk_reset_success(md, type);
+out:
spin_lock_irq(&md->lock);
__blk_end_request(req, err, blk_rq_bytes(req));
spin_unlock_irq(&md->lock);
@@ -1802,7 +1824,7 @@ static void mmc_blk_remove(struct mmc_card *card)
}
#ifdef CONFIG_PM
-static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
+static int mmc_blk_suspend(struct mmc_card *card)
{
struct mmc_blk_data *part_md;
struct mmc_blk_data *md = mmc_get_drvdata(card);
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 2517547..996f8e3 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -139,7 +139,7 @@ static void mmc_queue_setup_discard(struct request_queue *q,
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
q->limits.max_discard_sectors = max_discard;
- if (card->erased_byte == 0)
+ if (card->erased_byte == 0 && !mmc_can_discard(card))
q->limits.discard_zeroes_data = 1;
q->limits.discard_granularity = card->pref_erase << 9;
/* granularity must not be greater than max. discard */
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 3f60606..c60cee9 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -122,14 +122,14 @@ static int mmc_bus_remove(struct device *dev)
return 0;
}
-static int mmc_bus_suspend(struct device *dev, pm_message_t state)
+static int mmc_bus_suspend(struct device *dev)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
struct mmc_card *card = mmc_dev_to_card(dev);
int ret = 0;
if (dev->driver && drv->suspend)
- ret = drv->suspend(card, state);
+ ret = drv->suspend(card);
return ret;
}
@@ -165,20 +165,14 @@ static int mmc_runtime_idle(struct device *dev)
return pm_runtime_suspend(dev);
}
+#endif /* !CONFIG_PM_RUNTIME */
+
static const struct dev_pm_ops mmc_bus_pm_ops = {
- .runtime_suspend = mmc_runtime_suspend,
- .runtime_resume = mmc_runtime_resume,
- .runtime_idle = mmc_runtime_idle,
+ SET_RUNTIME_PM_OPS(mmc_runtime_suspend, mmc_runtime_resume,
+ mmc_runtime_idle)
+ SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_suspend, mmc_bus_resume)
};
-#define MMC_PM_OPS_PTR (&mmc_bus_pm_ops)
-
-#else /* !CONFIG_PM_RUNTIME */
-
-#define MMC_PM_OPS_PTR NULL
-
-#endif /* !CONFIG_PM_RUNTIME */
-
static struct bus_type mmc_bus_type = {
.name = "mmc",
.dev_attrs = mmc_dev_attrs,
@@ -186,9 +180,7 @@ static struct bus_type mmc_bus_type = {
.uevent = mmc_bus_uevent,
.probe = mmc_bus_probe,
.remove = mmc_bus_remove,
- .suspend = mmc_bus_suspend,
- .resume = mmc_bus_resume,
- .pm = MMC_PM_OPS_PTR,
+ .pm = &mmc_bus_pm_ops,
};
int mmc_register_bus(void)
diff --git a/drivers/mmc/core/cd-gpio.c b/drivers/mmc/core/cd-gpio.c
index 29de31e..2c14be7 100644
--- a/drivers/mmc/core/cd-gpio.c
+++ b/drivers/mmc/core/cd-gpio.c
@@ -12,6 +12,7 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
+#include <linux/mmc/cd-gpio.h>
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 7474c47..ba821fe 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1409,7 +1409,10 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
{
unsigned int erase_timeout;
- if (card->ext_csd.erase_group_def & 1) {
+ if (arg == MMC_DISCARD_ARG ||
+ (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
+ erase_timeout = card->ext_csd.trim_timeout;
+ } else if (card->ext_csd.erase_group_def & 1) {
/* High Capacity Erase Group Size uses HC timeouts */
if (arg == MMC_TRIM_ARG)
erase_timeout = card->ext_csd.trim_timeout;
@@ -1681,8 +1684,6 @@ int mmc_can_trim(struct mmc_card *card)
{
if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
return 1;
- if (mmc_can_discard(card))
- return 1;
return 0;
}
EXPORT_SYMBOL(mmc_can_trim);
@@ -1701,6 +1702,8 @@ EXPORT_SYMBOL(mmc_can_discard);
int mmc_can_sanitize(struct mmc_card *card)
{
+ if (!mmc_can_trim(card) && !mmc_can_erase(card))
+ return 0;
if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
return 1;
return 0;
@@ -2235,6 +2238,7 @@ int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
mmc_card_is_removable(host))
return err;
+ mmc_claim_host(host);
if (card && mmc_card_mmc(card) &&
(card->ext_csd.cache_size > 0)) {
enable = !!enable;
@@ -2252,6 +2256,7 @@ int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
card->ext_csd.cache_ctrl = enable;
}
}
+ mmc_release_host(host);
return err;
}
@@ -2269,49 +2274,32 @@ int mmc_suspend_host(struct mmc_host *host)
cancel_delayed_work(&host->detect);
mmc_flush_scheduled_work();
- if (mmc_try_claim_host(host)) {
- err = mmc_cache_ctrl(host, 0);
- mmc_release_host(host);
- } else {
- err = -EBUSY;
- }
+ err = mmc_cache_ctrl(host, 0);
if (err)
goto out;
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead) {
- /*
- * A long response time is not acceptable for device drivers
- * when doing suspend. Prevent mmc_claim_host in the suspend
- * sequence, to potentially wait "forever" by trying to
- * pre-claim the host.
- */
- if (mmc_try_claim_host(host)) {
- if (host->bus_ops->suspend) {
- err = host->bus_ops->suspend(host);
- }
- mmc_release_host(host);
+ if (host->bus_ops->suspend)
+ err = host->bus_ops->suspend(host);
- if (err == -ENOSYS || !host->bus_ops->resume) {
- /*
- * We simply "remove" the card in this case.
- * It will be redetected on resume. (Calling
- * bus_ops->remove() with a claimed host can
- * deadlock.)
- */
- if (host->bus_ops->remove)
- host->bus_ops->remove(host);
- mmc_claim_host(host);
- mmc_detach_bus(host);
- mmc_power_off(host);
- mmc_release_host(host);
- host->pm_flags = 0;
- err = 0;
- }
- } else {
- err = -EBUSY;
+ if (err == -ENOSYS || !host->bus_ops->resume) {
+ /*
+ * We simply "remove" the card in this case.
+ * It will be redetected on resume. (Calling
+ * bus_ops->remove() with a claimed host can
+ * deadlock.)
+ */
+ if (host->bus_ops->remove)
+ host->bus_ops->remove(host);
+ mmc_claim_host(host);
+ mmc_detach_bus(host);
+ mmc_power_off(host);
+ mmc_release_host(host);
+ host->pm_flags = 0;
+ err = 0;
}
}
mmc_bus_put(host);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index bf3c9b4..ab3fc46 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -526,8 +526,10 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
return -ENODEV;
sg_len = dw_mci_pre_dma_transfer(host, data, 0);
- if (sg_len < 0)
+ if (sg_len < 0) {
+ host->dma_ops->stop(host);
return sg_len;
+ }
host->using_dma = 1;
@@ -1879,7 +1881,8 @@ static void dw_mci_init_dma(struct dw_mci *host)
if (!host->dma_ops)
goto no_dma;
- if (host->dma_ops->init) {
+ if (host->dma_ops->init && host->dma_ops->start &&
+ host->dma_ops->stop && host->dma_ops->cleanup) {
if (host->dma_ops->init(host)) {
dev_err(&host->dev, "%s: Unable to initialize "
"DMA Controller.\n", __func__);
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index b0f2ef9..e3f5af9 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -363,6 +363,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host)
goto out;
dmaengine_submit(desc);
+ dma_async_issue_pending(host->dmach);
return;
out:
@@ -403,6 +404,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
goto out;
dmaengine_submit(desc);
+ dma_async_issue_pending(host->dmach);
return;
out:
@@ -531,6 +533,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
goto out;
dmaengine_submit(desc);
+ dma_async_issue_pending(host->dmach);
return;
out:
dev_warn(mmc_dev(host->mmc),
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 5c2b1c1..56d4499 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -249,7 +249,7 @@ static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on,
* the pbias cell programming support is still missing when
* booting with Device tree
*/
- if (of_have_populated_dt() && !vdd)
+ if (dev->of_node && !vdd)
return 0;
if (mmc_slot(host).before_set_reg)
@@ -1549,7 +1549,7 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
* can't be allowed when booting with device
* tree.
*/
- (!of_have_populated_dt())) {
+ !host->dev->of_node) {
/*
* The mmc_select_voltage fn of the core does
* not seem to set the power_mode to
@@ -1741,7 +1741,7 @@ static const struct of_device_id omap_mmc_of_match[] = {
.data = &omap4_reg_offset,
},
{},
-}
+};
MODULE_DEVICE_TABLE(of, omap_mmc_of_match);
static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 6193a0d..8abdaf6 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -467,8 +467,7 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
clk_prepare_enable(clk);
pltfm_host->clk = clk;
- if (!is_imx25_esdhc(imx_data))
- host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
if (is_imx25_esdhc(imx_data) || is_imx35_esdhc(imx_data))
/* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 9aa77f3..ccefdeb 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -147,7 +147,7 @@ static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
u32 present, irqs;
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
- !mmc_card_is_removable(host->mmc))
+ (host->mmc->caps & MMC_CAP_NONREMOVABLE))
return;
present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 75b1dde..9ec51ce 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -266,6 +266,7 @@ int start_dma_without_bch_irq(struct gpmi_nand_data *this,
desc->callback = dma_irq_callback;
desc->callback_param = this;
dmaengine_submit(desc);
+ dma_async_issue_pending(get_dma_chan(this));
/* Wait for the interrupt from the DMA block. */
err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 25197b6..b8b4c7b 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -89,16 +89,16 @@ static int __init arcrimi_probe(struct net_device *dev)
BUGLVL(D_NORMAL) printk(VERSION);
BUGLVL(D_NORMAL) printk("E-mail me if you actually test the RIM I driver, please!\n");
- BUGMSG(D_NORMAL, "Given: node %02Xh, shmem %lXh, irq %d\n",
+ BUGLVL(D_NORMAL) printk("Given: node %02Xh, shmem %lXh, irq %d\n",
dev->dev_addr[0], dev->mem_start, dev->irq);
if (dev->mem_start <= 0 || dev->irq <= 0) {
- BUGMSG(D_NORMAL, "No autoprobe for RIM I; you "
+ BUGLVL(D_NORMAL) printk("No autoprobe for RIM I; you "
"must specify the shmem and irq!\n");
return -ENODEV;
}
if (dev->dev_addr[0] == 0) {
- BUGMSG(D_NORMAL, "You need to specify your card's station "
+ BUGLVL(D_NORMAL) printk("You need to specify your card's station "
"ID!\n");
return -ENODEV;
}
@@ -109,7 +109,7 @@ static int __init arcrimi_probe(struct net_device *dev)
* will be taken.
*/
if (!request_mem_region(dev->mem_start, MIRROR_SIZE, "arcnet (90xx)")) {
- BUGMSG(D_NORMAL, "Card memory already allocated\n");
+ BUGLVL(D_NORMAL) printk("Card memory already allocated\n");
return -ENODEV;
}
return arcrimi_found(dev);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 793b001..3463b46 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2173,9 +2173,10 @@ re_arm:
* received frames (loopback). Since only the payload is given to this
* function, it check for loopback.
*/
-static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length)
+static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length)
{
struct port *port;
+ int ret = RX_HANDLER_ANOTHER;
if (length >= sizeof(struct lacpdu)) {
@@ -2184,11 +2185,12 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
if (!port->slave) {
pr_warning("%s: Warning: port of slave %s is uninitialized\n",
slave->dev->name, slave->dev->master->name);
- return;
+ return ret;
}
switch (lacpdu->subtype) {
case AD_TYPE_LACPDU:
+ ret = RX_HANDLER_CONSUMED;
pr_debug("Received LACPDU on port %d\n",
port->actor_port_number);
/* Protect against concurrent state machines */
@@ -2198,6 +2200,7 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
break;
case AD_TYPE_MARKER:
+ ret = RX_HANDLER_CONSUMED;
// No need to convert fields to Little Endian since we don't use the marker's fields.
switch (((struct bond_marker *)lacpdu)->tlv_type) {
@@ -2219,6 +2222,7 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
}
}
}
+ return ret;
}
/**
@@ -2456,18 +2460,20 @@ out:
return NETDEV_TX_OK;
}
-void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
+int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
+ int ret = RX_HANDLER_ANOTHER;
if (skb->protocol != PKT_TYPE_LACPDU)
- return;
+ return ret;
if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
- return;
+ return ret;
read_lock(&bond->lock);
- bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len);
+ ret = bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len);
read_unlock(&bond->lock);
+ return ret;
}
/*
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 235b2cc..5ee7e3c 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -274,7 +274,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave);
void bond_3ad_handle_link_change(struct slave *slave, char link);
int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
-void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
+int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
struct slave *slave);
int bond_3ad_set_carrier(struct bonding *bond);
void bond_3ad_update_lacp_rate(struct bonding *bond);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 62d2409..bc13b3d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1444,8 +1444,9 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
struct sk_buff *skb = *pskb;
struct slave *slave;
struct bonding *bond;
- void (*recv_probe)(struct sk_buff *, struct bonding *,
+ int (*recv_probe)(struct sk_buff *, struct bonding *,
struct slave *);
+ int ret = RX_HANDLER_ANOTHER;
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
@@ -1464,8 +1465,12 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
if (likely(nskb)) {
- recv_probe(nskb, bond, slave);
+ ret = recv_probe(nskb, bond, slave);
dev_kfree_skb(nskb);
+ if (ret == RX_HANDLER_CONSUMED) {
+ consume_skb(skb);
+ return ret;
+ }
}
}
@@ -1487,7 +1492,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN);
}
- return RX_HANDLER_ANOTHER;
+ return ret;
}
/* enslave device <slave> to bond device <master> */
@@ -2723,7 +2728,7 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
}
}
-static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
+static int bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
struct arphdr *arp;
@@ -2731,7 +2736,7 @@ static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
__be32 sip, tip;
if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
- return;
+ return RX_HANDLER_ANOTHER;
read_lock(&bond->lock);
@@ -2776,6 +2781,7 @@ static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
out_unlock:
read_unlock(&bond->lock);
+ return RX_HANDLER_ANOTHER;
}
/*
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 9a66e2a..9c1c8cd 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -744,14 +744,14 @@ static void cfhsi_wake_up(struct work_struct *work)
size_t fifo_occupancy = 0;
/* Wakeup timeout */
- dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
+ dev_dbg(&cfhsi->ndev->dev, "%s: Timeout.\n",
__func__);
/* Check FIFO to check if modem has sent something. */
WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
&fifo_occupancy));
- dev_err(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n",
+ dev_dbg(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n",
__func__, (unsigned) fifo_occupancy);
/* Check if we misssed the interrupt. */
@@ -1210,7 +1210,7 @@ int cfhsi_probe(struct platform_device *pdev)
static void cfhsi_shutdown(struct cfhsi *cfhsi)
{
- u8 *tx_buf, *rx_buf;
+ u8 *tx_buf, *rx_buf, *flip_buf;
/* Stop TXing */
netif_tx_stop_all_queues(cfhsi->ndev);
@@ -1234,7 +1234,7 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi)
/* Store bufferes: will be freed later. */
tx_buf = cfhsi->tx_buf;
rx_buf = cfhsi->rx_buf;
-
+ flip_buf = cfhsi->rx_flip_buf;
/* Flush transmit queues. */
cfhsi_abort_tx(cfhsi);
@@ -1247,6 +1247,7 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi)
/* Free buffers. */
kfree(tx_buf);
kfree(rx_buf);
+ kfree(flip_buf);
}
int cfhsi_remove(struct platform_device *pdev)
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 5234586..629c4ba 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -875,6 +875,7 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
PCAN_USBPRO_INFO_FW,
&fi, sizeof(fi));
if (err) {
+ kfree(usb_if);
dev_err(dev->netdev->dev.parent,
"unable to read %s firmware info (err %d)\n",
pcan_usb_pro.name, err);
@@ -885,6 +886,7 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
PCAN_USBPRO_INFO_BL,
&bi, sizeof(bi));
if (err) {
+ kfree(usb_if);
dev_err(dev->netdev->dev.parent,
"unable to read %s bootloader info (err %d)\n",
pcan_usb_pro.name, err);
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index d5c6d92..442d91a 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -107,14 +107,14 @@ static int dummy_dev_init(struct net_device *dev)
return 0;
}
-static void dummy_dev_free(struct net_device *dev)
+static void dummy_dev_uninit(struct net_device *dev)
{
free_percpu(dev->dstats);
- free_netdev(dev);
}
static const struct net_device_ops dummy_netdev_ops = {
.ndo_init = dummy_dev_init,
+ .ndo_uninit = dummy_dev_uninit,
.ndo_start_xmit = dummy_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = set_multicast_list,
@@ -128,7 +128,7 @@ static void dummy_setup(struct net_device *dev)
/* Initialize the device structure. */
dev->netdev_ops = &dummy_netdev_ops;
- dev->destructor = dummy_dev_free;
+ dev->destructor = free_netdev;
/* Fill in device structure with ethernet-generic values. */
dev->tx_queue_len = 0;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 40ac414..c926857 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2476,7 +2476,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
"pcie phy link down %x\n", status);
if (netif_running(adapter->netdev)) { /* reset MAC */
iowrite32(0, adapter->hw.hw_addr + REG_IMR);
- schedule_work(&adapter->pcie_dma_to_rst_task);
+ schedule_work(&adapter->reset_dev_task);
return IRQ_HANDLED;
}
}
@@ -2488,7 +2488,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
"pcie DMA r/w error (status = 0x%x)\n",
status);
iowrite32(0, adapter->hw.hw_addr + REG_IMR);
- schedule_work(&adapter->pcie_dma_to_rst_task);
+ schedule_work(&adapter->reset_dev_task);
return IRQ_HANDLED;
}
@@ -2633,10 +2633,10 @@ static void atl1_down(struct atl1_adapter *adapter)
atl1_clean_rx_ring(adapter);
}
-static void atl1_tx_timeout_task(struct work_struct *work)
+static void atl1_reset_dev_task(struct work_struct *work)
{
struct atl1_adapter *adapter =
- container_of(work, struct atl1_adapter, tx_timeout_task);
+ container_of(work, struct atl1_adapter, reset_dev_task);
struct net_device *netdev = adapter->netdev;
netif_device_detach(netdev);
@@ -3038,12 +3038,10 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
(unsigned long)adapter);
adapter->phy_timer_pending = false;
- INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task);
+ INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task);
INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
- INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
-
err = register_netdev(netdev);
if (err)
goto err_common;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h
index 109d6da..e04bf4d 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.h
+++ b/drivers/net/ethernet/atheros/atlx/atl1.h
@@ -758,9 +758,8 @@ struct atl1_adapter {
u16 link_speed;
u16 link_duplex;
spinlock_t lock;
- struct work_struct tx_timeout_task;
+ struct work_struct reset_dev_task;
struct work_struct link_chg_task;
- struct work_struct pcie_dma_to_rst_task;
struct timer_list phy_config_timer;
bool phy_timer_pending;
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index 3cd8837..c9e9dc5 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -194,7 +194,7 @@ static void atlx_tx_timeout(struct net_device *netdev)
{
struct atlx_adapter *adapter = netdev_priv(netdev);
/* Do the reset outside of interrupt context */
- schedule_work(&adapter->tx_timeout_task);
+ schedule_work(&adapter->reset_dev_task);
}
/*
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index ad95324..64392ec 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -942,6 +942,12 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
DCBX_E3B0_MAX_NUM_COS_PORT0;
+ if (pri >= max_num_of_cos) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
+ "parameter Illegal strict priority\n");
+ return -EINVAL;
+ }
+
if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
"parameter There can't be two COS's with "
@@ -949,12 +955,6 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
return -EINVAL;
}
- if (pri > max_num_of_cos) {
- DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
- "parameter Illegal strict priority\n");
- return -EINVAL;
- }
-
sp_pri_to_cos[pri] = cos_entry;
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e077d25..6af3101 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9122,13 +9122,34 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
return bnx2x_prev_mcp_done(bp);
}
+/* previous driver DMAE transaction may have occurred when pre-boot stage ended
+ * and boot began, or when kdump kernel was loaded. Either case would invalidate
+ * the addresses of the transaction, resulting in was-error bit set in the pci
+ * causing all hw-to-host pcie transactions to timeout. If this happened we want
+ * to clear the interrupt which detected this from the pglueb and the was done
+ * bit
+ */
+static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
+ BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
+ REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp));
+ }
+}
+
static int __devinit bnx2x_prev_unload(struct bnx2x *bp)
{
int time_counter = 10;
u32 rc, fw, hw_lock_reg, hw_lock_val;
BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
- /* Release previously held locks */
+ /* clear hw from errors which may have resulted from an interrupted
+ * dmae transaction.
+ */
+ bnx2x_prev_interrupted_dmae(bp);
+
+ /* Release previously held locks */
hw_lock_reg = (BP_FUNC(bp) <= 5) ?
(MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
(MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 062ac33..ceeab8e 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -879,8 +879,13 @@ static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
if (sblk->status & SD_STATUS_LINK_CHG)
work_exists = 1;
}
- /* check for RX/TX work to do */
- if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
+
+ /* check for TX work to do */
+ if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
+ work_exists = 1;
+
+ /* check for RX work to do */
+ if (tnapi->rx_rcb_prod_idx &&
*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
work_exists = 1;
@@ -6124,6 +6129,9 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
return work_done;
}
+ if (!tnapi->rx_rcb_prod_idx)
+ return work_done;
+
/* run RX thread, within the bounds set by NAPI.
* All RX "locking" is done by ensuring outside
* code synchronizes with tg3->napi.poll()
@@ -7567,6 +7575,12 @@ static int tg3_alloc_consistent(struct tg3 *tp)
*/
switch (i) {
default:
+ if (tg3_flag(tp, ENABLE_RSS)) {
+ tnapi->rx_rcb_prod_idx = NULL;
+ break;
+ }
+ /* Fall through */
+ case 1:
tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
break;
case 2:
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 63bfdd1..abb6ce7 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1150,6 +1150,48 @@ release_tpsram:
}
/**
+ * t3_synchronize_rx - wait for current Rx processing on a port to complete
+ * @adap: the adapter
+ * @p: the port
+ *
+ * Ensures that current Rx processing on any of the queues associated with
+ * the given port completes before returning. We do this by acquiring and
+ * releasing the locks of the response queues associated with the port.
+ */
+static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
+{
+ int i;
+
+ for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
+ struct sge_rspq *q = &adap->sge.qs[i].rspq;
+
+ spin_lock_irq(&q->lock);
+ spin_unlock_irq(&q->lock);
+ }
+}
+
+static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ if (adapter->params.rev > 0) {
+ t3_set_vlan_accel(adapter, 1 << pi->port_id,
+ features & NETIF_F_HW_VLAN_RX);
+ } else {
+ /* single control for all ports */
+ unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX;
+
+ for_each_port(adapter, i)
+ have_vlans |=
+ adapter->port[i]->features & NETIF_F_HW_VLAN_RX;
+
+ t3_set_vlan_accel(adapter, 1, have_vlans);
+ }
+ t3_synchronize_rx(adapter, pi);
+}
+
+/**
* cxgb_up - enable the adapter
* @adapter: adapter being enabled
*
@@ -1161,7 +1203,7 @@ release_tpsram:
*/
static int cxgb_up(struct adapter *adap)
{
- int err;
+ int i, err;
if (!(adap->flags & FULL_INIT_DONE)) {
err = t3_check_fw_version(adap);
@@ -1198,6 +1240,9 @@ static int cxgb_up(struct adapter *adap)
if (err)
goto out;
+ for_each_port(adap, i)
+ cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
+
setup_rss(adap);
if (!(adap->flags & NAPI_INIT))
init_napi(adap);
@@ -2508,48 +2553,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
-/**
- * t3_synchronize_rx - wait for current Rx processing on a port to complete
- * @adap: the adapter
- * @p: the port
- *
- * Ensures that current Rx processing on any of the queues associated with
- * the given port completes before returning. We do this by acquiring and
- * releasing the locks of the response queues associated with the port.
- */
-static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
-{
- int i;
-
- for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
- struct sge_rspq *q = &adap->sge.qs[i].rspq;
-
- spin_lock_irq(&q->lock);
- spin_unlock_irq(&q->lock);
- }
-}
-
-static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
-{
- struct port_info *pi = netdev_priv(dev);
- struct adapter *adapter = pi->adapter;
-
- if (adapter->params.rev > 0) {
- t3_set_vlan_accel(adapter, 1 << pi->port_id,
- features & NETIF_F_HW_VLAN_RX);
- } else {
- /* single control for all ports */
- unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX;
-
- for_each_port(adapter, i)
- have_vlans |=
- adapter->port[i]->features & NETIF_F_HW_VLAN_RX;
-
- t3_set_vlan_accel(adapter, 1, have_vlans);
- }
- t3_synchronize_rx(adapter, pi);
-}
-
static netdev_features_t cxgb_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -3353,9 +3356,6 @@ static int __devinit init_one(struct pci_dev *pdev,
err = sysfs_create_group(&adapter->port[0]->dev.kobj,
&cxgb3_attr_group);
- for_each_port(adapter, i)
- cxgb_vlan_mode(adapter->port[i], adapter->port[i]->features);
-
print_port_info(adapter, ai);
return 0;
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index b2dc2c8..2e09edb 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1259,55 +1259,21 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
{
int phy_addr;
struct netdev_private *np = netdev_priv(dev);
- struct mii_data *miidata = (struct mii_data *) &rq->ifr_ifru;
-
- struct netdev_desc *desc;
- int i;
+ struct mii_ioctl_data *miidata = if_mii(rq);
phy_addr = np->phy_addr;
switch (cmd) {
- case SIOCDEVPRIVATE:
- break;
-
- case SIOCDEVPRIVATE + 1:
- miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num);
+ case SIOCGMIIPHY:
+ miidata->phy_id = phy_addr;
break;
- case SIOCDEVPRIVATE + 2:
- mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value);
+ case SIOCGMIIREG:
+ miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num);
break;
- case SIOCDEVPRIVATE + 3:
- break;
- case SIOCDEVPRIVATE + 4:
- break;
- case SIOCDEVPRIVATE + 5:
- netif_stop_queue (dev);
+ case SIOCSMIIREG:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in);
break;
- case SIOCDEVPRIVATE + 6:
- netif_wake_queue (dev);
- break;
- case SIOCDEVPRIVATE + 7:
- printk
- ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n",
- netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx,
- np->old_rx);
- break;
- case SIOCDEVPRIVATE + 8:
- printk("TX ring:\n");
- for (i = 0; i < TX_RING_SIZE; i++) {
- desc = &np->tx_ring[i];
- printk
- ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
- i,
- (u32) (np->tx_ring_dma + i * sizeof (*desc)),
- (u32)le64_to_cpu(desc->next_desc),
- (u32)le64_to_cpu(desc->status),
- (u32)(le64_to_cpu(desc->fraginfo) >> 32),
- (u32)le64_to_cpu(desc->fraginfo));
- printk ("\n");
- }
- printk ("\n");
- break;
-
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index ba0adca..30c2da3 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -365,13 +365,6 @@ struct ioctl_data {
char *data;
};
-struct mii_data {
- __u16 reserved;
- __u16 reg_num;
- __u16 in_value;
- __u16 out_value;
-};
-
/* The Rx and Tx buffer descriptors. */
struct netdev_desc {
__le64 next_desc;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 17a46e7..9ac14f8 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -116,10 +116,10 @@ static struct ucc_geth_info ugeth_primary_info = {
.maxGroupAddrInHash = 4,
.maxIndAddrInHash = 4,
.prel = 7,
- .maxFrameLength = 1518,
+ .maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */
.minFrameLength = 64,
- .maxD1Length = 1520,
- .maxD2Length = 1520,
+ .maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */
+ .maxD2Length = 1520+16, /* Add extra bytes for VLANs etc. */
.vlantype = 0x8100,
.ecamptr = ((uint32_t) NULL),
.eventRegMask = UCCE_OTHER,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 2e395a2..f71b3e7 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -877,7 +877,7 @@ struct ucc_geth_hardware_statistics {
/* Driver definitions */
#define TX_BD_RING_LEN 0x10
-#define RX_BD_RING_LEN 0x10
+#define RX_BD_RING_LEN 0x20
#define TX_RING_MOD_MASK(size) (size-1)
#define RX_RING_MOD_MASK(size) (size-1)
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 3516e17..f4d2da0 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -290,16 +290,18 @@ static void ehea_update_bcmc_registrations(void)
arr[i].adh = adapter->handle;
arr[i].port_id = port->logical_port_id;
- arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
- EHEA_BCMC_MULTICAST |
+ arr[i].reg_type = EHEA_BCMC_MULTICAST |
EHEA_BCMC_UNTAGGED;
+ if (mc_entry->macaddr == 0)
+ arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
arr[i++].macaddr = mc_entry->macaddr;
arr[i].adh = adapter->handle;
arr[i].port_id = port->logical_port_id;
- arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
- EHEA_BCMC_MULTICAST |
+ arr[i].reg_type = EHEA_BCMC_MULTICAST |
EHEA_BCMC_VLANID_ALL;
+ if (mc_entry->macaddr == 0)
+ arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
arr[i++].macaddr = mc_entry->macaddr;
num_registrations -= 2;
}
@@ -1838,8 +1840,9 @@ static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
u64 hret;
u8 reg_type;
- reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
- | EHEA_BCMC_UNTAGGED;
+ reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_UNTAGGED;
+ if (mc_mac_addr == 0)
+ reg_type |= EHEA_BCMC_SCOPE_ALL;
hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
port->logical_port_id,
@@ -1847,8 +1850,9 @@ static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
if (hret)
goto out;
- reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
- | EHEA_BCMC_VLANID_ALL;
+ reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_VLANID_ALL;
+ if (mc_mac_addr == 0)
+ reg_type |= EHEA_BCMC_SCOPE_ALL;
hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
port->logical_port_id,
@@ -1898,7 +1902,7 @@ static void ehea_allmulti(struct net_device *dev, int enable)
netdev_err(dev,
"failed enabling IFF_ALLMULTI\n");
}
- } else
+ } else {
if (!enable) {
/* Disable ALLMULTI */
hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
@@ -1908,6 +1912,7 @@ static void ehea_allmulti(struct net_device *dev, int enable)
netdev_err(dev,
"failed disabling IFF_ALLMULTI\n");
}
+ }
}
static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
@@ -1941,11 +1946,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
struct netdev_hw_addr *ha;
int ret;
- if (port->promisc) {
- ehea_promiscuous(dev, 1);
- return;
- }
- ehea_promiscuous(dev, 0);
+ ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC));
if (dev->flags & IFF_ALLMULTI) {
ehea_allmulti(dev, 1);
@@ -2463,6 +2464,7 @@ static int ehea_down(struct net_device *dev)
return 0;
ehea_drop_multicast_list(dev);
+ ehea_allmulti(dev, 0);
ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
ehea_free_interrupts(dev);
@@ -3261,6 +3263,7 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
struct ehea_adapter *adapter;
const u64 *adapter_handle;
int ret;
+ int i;
if (!dev || !dev->dev.of_node) {
pr_err("Invalid ibmebus device probed\n");
@@ -3314,17 +3317,9 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
(unsigned long)adapter);
- ret = ibmebus_request_irq(adapter->neq->attr.ist1,
- ehea_interrupt_neq, IRQF_DISABLED,
- "ehea_neq", adapter);
- if (ret) {
- dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
- goto out_kill_eq;
- }
-
ret = ehea_create_device_sysfs(dev);
if (ret)
- goto out_free_irq;
+ goto out_kill_eq;
ret = ehea_setup_ports(adapter);
if (ret) {
@@ -3332,15 +3327,30 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
goto out_rem_dev_sysfs;
}
+ ret = ibmebus_request_irq(adapter->neq->attr.ist1,
+ ehea_interrupt_neq, IRQF_DISABLED,
+ "ehea_neq", adapter);
+ if (ret) {
+ dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
+ goto out_shutdown_ports;
+ }
+
+ /* Handle any events that might be pending. */
+ tasklet_hi_schedule(&adapter->neq_tasklet);
+
ret = 0;
goto out;
+out_shutdown_ports:
+ for (i = 0; i < EHEA_MAX_PORTS; i++)
+ if (adapter->port[i]) {
+ ehea_shutdown_single_port(adapter->port[i]);
+ adapter->port[i] = NULL;
+ }
+
out_rem_dev_sysfs:
ehea_remove_device_sysfs(dev);
-out_free_irq:
- ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
-
out_kill_eq:
ehea_destroy_eq(adapter->neq);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
index 52c456e..8364815 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h
@@ -450,7 +450,7 @@ u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num,
void *cb_addr);
#define H_REGBCMC_PN EHEA_BMASK_IBM(48, 63)
-#define H_REGBCMC_REGTYPE EHEA_BMASK_IBM(61, 63)
+#define H_REGBCMC_REGTYPE EHEA_BMASK_IBM(60, 63)
#define H_REGBCMC_MACADDR EHEA_BMASK_IBM(16, 63)
#define H_REGBCMC_VLANID EHEA_BMASK_IBM(52, 63)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 4348b6f..37caa88 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3380,7 +3380,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
- struct my_u { u64 a; u64 b; };
+ struct my_u { __le64 a; __le64 b; };
struct my_u *u = (struct my_u *)tx_desc;
const char *type;
@@ -3424,7 +3424,7 @@ rx_ring_summary:
for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
- struct my_u { u64 a; u64 b; };
+ struct my_u { __le64 a; __le64 b; };
struct my_u *u = (struct my_u *)rx_desc;
const char *type;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 64c7644..b461c24 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1310,10 +1310,6 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
oem_reg |= HV_OEM_BITS_LPLU;
-
- /* Set Restart auto-neg to activate the bits */
- if (!hw->phy.ops.check_reset_block(hw))
- oem_reg |= HV_OEM_BITS_RESTART_AN;
} else {
if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
@@ -1324,6 +1320,11 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
oem_reg |= HV_OEM_BITS_LPLU;
}
+ /* Set Restart auto-neg to activate the bits */
+ if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
+ !hw->phy.ops.check_reset_block(hw))
+ oem_reg |= HV_OEM_BITS_RESTART_AN;
+
ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
release:
@@ -3682,7 +3683,11 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
if (hw->mac.type >= e1000_pchlan) {
e1000_oem_bits_config_ich8lan(hw, false);
- e1000_phy_hw_reset_ich8lan(hw);
+
+ /* Reset PHY to activate OEM bits on 82577/8 */
+ if (hw->mac.type == e1000_pchlan)
+ e1000e_phy_hw_reset_generic(hw);
+
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 19ab215..9520a6a 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3799,7 +3799,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
/* fire an unusual interrupt on the test handler */
ew32(ICS, E1000_ICS_RXSEQ);
e1e_flush();
- msleep(50);
+ msleep(100);
e1000_irq_disable(adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index ff796e4..16adeb9 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -106,7 +106,7 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
/*
* Interrupt Throttle Rate (interrupts/sec)
*
- * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
+ * Valid Range: 100-100000 or one of: 0=off, 1=dynamic, 3=dynamic conservative
*/
E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
#define DEFAULT_ITR 3
@@ -344,53 +344,60 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
if (num_InterruptThrottleRate > bd) {
adapter->itr = InterruptThrottleRate[bd];
- switch (adapter->itr) {
- case 0:
- e_info("%s turned off\n", opt.name);
- break;
- case 1:
- e_info("%s set to dynamic mode\n", opt.name);
- adapter->itr_setting = adapter->itr;
- adapter->itr = 20000;
- break;
- case 3:
- e_info("%s set to dynamic conservative mode\n",
- opt.name);
- adapter->itr_setting = adapter->itr;
- adapter->itr = 20000;
- break;
- case 4:
- e_info("%s set to simplified (2000-8000 ints) "
- "mode\n", opt.name);
- adapter->itr_setting = 4;
- break;
- default:
- /*
- * Save the setting, because the dynamic bits
- * change itr.
- */
- if (e1000_validate_option(&adapter->itr, &opt,
- adapter) &&
- (adapter->itr == 3)) {
- /*
- * In case of invalid user value,
- * default to conservative mode.
- */
- adapter->itr_setting = adapter->itr;
- adapter->itr = 20000;
- } else {
- /*
- * Clear the lower two bits because
- * they are used as control.
- */
- adapter->itr_setting =
- adapter->itr & ~3;
- }
- break;
- }
+
+ /*
+ * Make sure a message is printed for non-special
+ * values. And in case of an invalid option, display
+ * warning, use default and got through itr/itr_setting
+ * adjustment logic below
+ */
+ if ((adapter->itr > 4) &&
+ e1000_validate_option(&adapter->itr, &opt, adapter))
+ adapter->itr = opt.def;
} else {
- adapter->itr_setting = opt.def;
+ /*
+ * If no option specified, use default value and go
+ * through the logic below to adjust itr/itr_setting
+ */
+ adapter->itr = opt.def;
+
+ /*
+ * Make sure a message is printed for non-special
+ * default values
+ */
+ if (adapter->itr > 40)
+ e_info("%s set to default %d\n", opt.name,
+ adapter->itr);
+ }
+
+ adapter->itr_setting = adapter->itr;
+ switch (adapter->itr) {
+ case 0:
+ e_info("%s turned off\n", opt.name);
+ break;
+ case 1:
+ e_info("%s set to dynamic mode\n", opt.name);
+ adapter->itr = 20000;
+ break;
+ case 3:
+ e_info("%s set to dynamic conservative mode\n",
+ opt.name);
adapter->itr = 20000;
+ break;
+ case 4:
+ e_info("%s set to simplified (2000-8000 ints) mode\n",
+ opt.name);
+ break;
+ default:
+ /*
+ * Save the setting, because the dynamic bits
+ * change itr.
+ *
+ * Clear the lower two bits because
+ * they are used as control.
+ */
+ adapter->itr_setting &= ~3;
+ break;
}
}
{ /* Interrupt Mode */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 5ec3159..8683ca4 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1111,9 +1111,12 @@ msi_only:
adapter->flags |= IGB_FLAG_HAS_MSI;
out:
/* Notify the stack of the (possibly) reduced queue counts. */
+ rtnl_lock();
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
- return netif_set_real_num_rx_queues(adapter->netdev,
- adapter->num_rx_queues);
+ err = netif_set_real_num_rx_queues(adapter->netdev,
+ adapter->num_rx_queues);
+ rtnl_unlock();
+ return err;
}
/**
@@ -2771,8 +2774,6 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
wr32(E1000_TXDCTL(reg_idx), txdctl);
-
- netdev_tx_reset_queue(txring_txq(ring));
}
/**
@@ -3282,6 +3283,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
}
+ netdev_tx_reset_queue(txring_txq(tx_ring));
+
size = sizeof(struct igb_tx_buffer) * tx_ring->count;
memset(tx_ring->tx_buffer_info, 0, size);
@@ -6796,18 +6799,7 @@ static int igb_resume(struct device *dev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
- if (!rtnl_is_locked()) {
- /*
- * shut up ASSERT_RTNL() warning in
- * netif_set_real_num_tx/rx_queues.
- */
- rtnl_lock();
- err = igb_init_interrupt_scheme(adapter);
- rtnl_unlock();
- } else {
- err = igb_init_interrupt_scheme(adapter);
- }
- if (err) {
+ if (igb_init_interrupt_scheme(adapter)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index d61ca2a..8ec74b0 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2731,14 +2731,14 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
netdev->addr_len);
}
- if (!is_valid_ether_addr(netdev->perm_addr)) {
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
netdev->dev_addr);
err = -EIO;
goto err_hw_init;
}
- memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
(unsigned long) adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 74e1921..81b1555 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -574,9 +574,6 @@ extern struct ixgbe_info ixgbe_82599_info;
extern struct ixgbe_info ixgbe_X540_info;
#ifdef CONFIG_IXGBE_DCB
extern const struct dcbnl_rtnl_ops dcbnl_ops;
-extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
- struct ixgbe_dcb_config *dst_dcb_cfg,
- int tc_max);
#endif
extern char ixgbe_driver_name[];
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 652e4b0..32e5c02 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -44,18 +44,26 @@
#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */
#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */
-int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *scfg,
- struct ixgbe_dcb_config *dcfg, int tc_max)
+static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
{
+ struct ixgbe_dcb_config *scfg = &adapter->temp_dcb_cfg;
+ struct ixgbe_dcb_config *dcfg = &adapter->dcb_cfg;
struct tc_configuration *src = NULL;
struct tc_configuration *dst = NULL;
int i, j;
int tx = DCB_TX_CONFIG;
int rx = DCB_RX_CONFIG;
int changes = 0;
+#ifdef IXGBE_FCOE
+ struct dcb_app app = {
+ .selector = DCB_APP_IDTYPE_ETHTYPE,
+ .protocol = ETH_P_FCOE,
+ };
+ u8 up = dcb_getapp(adapter->netdev, &app);
- if (!scfg || !dcfg)
- return changes;
+ if (up && !(up & (1 << adapter->fcoe.up)))
+ changes |= BIT_APP_UPCHG;
+#endif
for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0];
@@ -332,28 +340,12 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int ret = DCB_NO_HW_CHG;
int i;
-#ifdef IXGBE_FCOE
- struct dcb_app app = {
- .selector = DCB_APP_IDTYPE_ETHTYPE,
- .protocol = ETH_P_FCOE,
- };
- u8 up;
-
- /* In IEEE mode, use the IEEE Ethertype selector value */
- if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) {
- app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
- up = dcb_ieee_getapp_mask(netdev, &app);
- } else {
- up = dcb_getapp(netdev, &app);
- }
-#endif
/* Fail command if not in CEE mode */
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return ret;
- adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg,
- &adapter->dcb_cfg,
+ adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter,
MAX_TRAFFIC_CLASS);
if (!adapter->dcb_set_bitmap)
return ret;
@@ -440,8 +432,13 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
* FCoE is using changes. This happens if the APP info
* changes or the up2tc mapping is updated.
*/
- if ((up && !(up & (1 << adapter->fcoe.up))) ||
- (adapter->dcb_set_bitmap & BIT_APP_UPCHG)) {
+ if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
+ struct dcb_app app = {
+ .selector = DCB_APP_IDTYPE_ETHTYPE,
+ .protocol = ETH_P_FCOE,
+ };
+ u8 up = dcb_getapp(netdev, &app);
+
adapter->fcoe.up = ffs(up) - 1;
ixgbe_dcbnl_devreset(netdev);
ret = DCB_HW_CHG_RST;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 31a2bf7..cfe7d26 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1780,6 +1780,8 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
}
+ netdev_tx_reset_queue(txring_txq(tx_ring));
+
/* re-map buffers to ring, store next to clean values */
ixgbe_alloc_rx_buffers(rx_ring, count);
rx_ring->next_to_clean = rx_ntc;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 77ea4b7..bc07933 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -437,6 +437,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
*/
if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
(fctl & FC_FC_END_SEQ)) {
+ skb_linearize(skb);
crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
crc->fcoe_eof = FC_EOF_T;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 027d7a7..ed1b47d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -622,6 +622,16 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx,
if (adapter->hw.mac.type == ixgbe_mac_82599EB)
set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
+#ifdef IXGBE_FCOE
+ if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
+ struct ixgbe_ring_feature *f;
+ f = &adapter->ring_feature[RING_F_FCOE];
+ if ((rxr_idx >= f->mask) &&
+ (rxr_idx < f->mask + f->indices))
+ set_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state);
+ }
+
+#endif /* IXGBE_FCOE */
/* apply Rx specific ring traits */
ring->count = adapter->rx_ring_count;
ring->queue_index = rxr_idx;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 3e26b1f..467948e9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2671,8 +2671,6 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
/* enable queue */
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
- netdev_tx_reset_queue(txring_txq(ring));
-
/* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
if (hw->mac.type == ixgbe_mac_82598EB &&
!(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
@@ -3154,14 +3152,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
set_ring_rsc_enabled(rx_ring);
else
clear_ring_rsc_enabled(rx_ring);
-#ifdef IXGBE_FCOE
- if (netdev->features & NETIF_F_FCOE_MTU) {
- struct ixgbe_ring_feature *f;
- f = &adapter->ring_feature[RING_F_FCOE];
- if ((i >= f->mask) && (i < f->mask + f->indices))
- set_bit(__IXGBE_RX_FCOE_BUFSZ, &rx_ring->state);
- }
-#endif /* IXGBE_FCOE */
}
}
@@ -4175,6 +4165,8 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
}
+ netdev_tx_reset_queue(txring_txq(tx_ring));
+
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
memset(tx_ring->tx_buffer_info, 0, size);
@@ -4426,8 +4418,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->dcb_cfg.pfc_mode_enable = false;
adapter->dcb_set_bitmap = 0x00;
adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
- ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
- MAX_TRAFFIC_CLASS);
+ memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
+ sizeof(adapter->temp_dcb_cfg));
#endif
@@ -4836,7 +4828,9 @@ static int ixgbe_resume(struct pci_dev *pdev)
pci_wake_from_d3(pdev, false);
+ rtnl_lock();
err = ixgbe_init_interrupt_scheme(adapter);
+ rtnl_unlock();
if (err) {
e_dev_err("Cannot initialize interrupts for device\n");
return err;
@@ -4872,17 +4866,15 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
netif_device_detach(netdev);
if (netif_running(netdev)) {
+ rtnl_lock();
ixgbe_down(adapter);
ixgbe_free_irq(adapter);
ixgbe_free_all_tx_resources(adapter);
ixgbe_free_all_rx_resources(adapter);
+ rtnl_unlock();
}
ixgbe_clear_interrupt_scheme(adapter);
-#ifdef CONFIG_DCB
- kfree(adapter->ixgbe_ieee_pfc);
- kfree(adapter->ixgbe_ieee_ets);
-#endif
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
@@ -4893,6 +4885,16 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
if (wufc) {
ixgbe_set_rx_mode(netdev);
+ /*
+ * enable the optics for both mult-speed fiber and
+ * 82599 SFP+ fiber as we can WoL.
+ */
+ if (hw->mac.ops.enable_tx_laser &&
+ (hw->phy.multispeed_fiber ||
+ (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber &&
+ hw->mac.type == ixgbe_mac_82599EB)))
+ hw->mac.ops.enable_tx_laser(hw);
+
/* turn on all-multi mode if wake on multicast is enabled */
if (wufc & IXGBE_WUFC_MC) {
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
@@ -7220,6 +7222,11 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
ixgbe_release_hw_control(adapter);
+#ifdef CONFIG_DCB
+ kfree(adapter->ixgbe_ieee_pfc);
+ kfree(adapter->ixgbe_ieee_ets);
+
+#endif
iounmap(adapter->hw.hw_addr);
pci_release_selected_regions(pdev, pci_select_bars(pdev,
IORESOURCE_MEM));
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index c9b504e..487a6c8 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2494,8 +2494,13 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
skb_copy_from_linear_data(re->skb, skb->data, length);
skb->ip_summed = re->skb->ip_summed;
skb->csum = re->skb->csum;
+ skb->rxhash = re->skb->rxhash;
+ skb->vlan_tci = re->skb->vlan_tci;
+
pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
length, PCI_DMA_FROMDEVICE);
+ re->skb->vlan_tci = 0;
+ re->skb->rxhash = 0;
re->skb->ip_summed = CHECKSUM_NONE;
skb_put(skb, length);
}
@@ -2580,9 +2585,6 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
struct sk_buff *skb = NULL;
u16 count = (status & GMR_FS_LEN) >> 16;
- if (status & GMR_FS_VLAN)
- count -= VLAN_HLEN; /* Account for vlan tag */
-
netif_printk(sky2, rx_status, KERN_DEBUG, dev,
"rx slot %u status 0x%x len %d\n",
sky2->rx_next, status, length);
@@ -2590,6 +2592,9 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
prefetch(sky2->rx_ring + sky2->rx_next);
+ if (vlan_tx_tag_present(re->skb))
+ count -= VLAN_HLEN; /* Account for vlan tag */
+
/* This chip has hardware problems that generates bogus status.
* So do only marginal checking and expect higher level protocols
* to handle crap frames.
@@ -2647,11 +2652,8 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
}
static inline void sky2_skb_rx(const struct sky2_port *sky2,
- u32 status, struct sk_buff *skb)
+ struct sk_buff *skb)
{
- if (status & GMR_FS_VLAN)
- __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag));
-
if (skb->ip_summed == CHECKSUM_NONE)
netif_receive_skb(skb);
else
@@ -2705,6 +2707,14 @@ static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
}
}
+static void sky2_rx_tag(struct sky2_port *sky2, u16 length)
+{
+ struct sk_buff *skb;
+
+ skb = sky2->rx_ring[sky2->rx_next].skb;
+ __vlan_hwaccel_put_tag(skb, be16_to_cpu(length));
+}
+
static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
{
struct sk_buff *skb;
@@ -2763,8 +2773,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
}
skb->protocol = eth_type_trans(skb, dev);
-
- sky2_skb_rx(sky2, status, skb);
+ sky2_skb_rx(sky2, skb);
/* Stop after net poll weight */
if (++work_done >= to_do)
@@ -2772,11 +2781,11 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
break;
case OP_RXVLAN:
- sky2->rx_tag = length;
+ sky2_rx_tag(sky2, length);
break;
case OP_RXCHKSVLAN:
- sky2->rx_tag = length;
+ sky2_rx_tag(sky2, length);
/* fall through */
case OP_RXCHKS:
if (likely(dev->features & NETIF_F_RXCSUM))
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index ff6f58b..3c896ce 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -2241,7 +2241,6 @@ struct sky2_port {
u16 rx_pending;
u16 rx_data_size;
u16 rx_nfrags;
- u16 rx_tag;
struct {
unsigned long last;
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index c722aa6..5e313e9 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -618,10 +618,8 @@ static void ks8851_irq_work(struct work_struct *work)
netif_dbg(ks, intr, ks->netdev,
"%s: status 0x%04x\n", __func__, status);
- if (status & IRQ_LCI) {
- /* should do something about checking link status */
+ if (status & IRQ_LCI)
handled |= IRQ_LCI;
- }
if (status & IRQ_LDI) {
u16 pmecr = ks8851_rdreg16(ks, KS_PMECR);
@@ -684,6 +682,9 @@ static void ks8851_irq_work(struct work_struct *work)
mutex_unlock(&ks->lock);
+ if (status & IRQ_LCI)
+ mii_check_link(&ks->mii);
+
if (status & IRQ_TXI)
netif_wake_queue(ks->netdev);
@@ -889,16 +890,17 @@ static int ks8851_net_stop(struct net_device *dev)
netif_stop_queue(dev);
mutex_lock(&ks->lock);
+ /* turn off the IRQs and ack any outstanding */
+ ks8851_wrreg16(ks, KS_IER, 0x0000);
+ ks8851_wrreg16(ks, KS_ISR, 0xffff);
+ mutex_unlock(&ks->lock);
/* stop any outstanding work */
flush_work(&ks->irq_work);
flush_work(&ks->tx_work);
flush_work(&ks->rxctrl_work);
- /* turn off the IRQs and ack any outstanding */
- ks8851_wrreg16(ks, KS_IER, 0x0000);
- ks8851_wrreg16(ks, KS_ISR, 0xffff);
-
+ mutex_lock(&ks->lock);
/* shutdown RX process */
ks8851_wrreg16(ks, KS_RXCR1, 0x0000);
@@ -907,6 +909,7 @@ static int ks8851_net_stop(struct net_device *dev)
/* set powermode to soft power down to save power */
ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
+ mutex_unlock(&ks->lock);
/* ensure any queued tx buffers are dumped */
while (!skb_queue_empty(&ks->txq)) {
@@ -918,7 +921,6 @@ static int ks8851_net_stop(struct net_device *dev)
dev_kfree_skb(txb);
}
- mutex_unlock(&ks->lock);
return 0;
}
@@ -1418,6 +1420,7 @@ static int __devinit ks8851_probe(struct spi_device *spi)
struct net_device *ndev;
struct ks8851_net *ks;
int ret;
+ unsigned cider;
ndev = alloc_etherdev(sizeof(struct ks8851_net));
if (!ndev)
@@ -1484,8 +1487,8 @@ static int __devinit ks8851_probe(struct spi_device *spi)
ks8851_soft_reset(ks, GRR_GSR);
/* simple check for a valid chip being connected to the bus */
-
- if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
+ cider = ks8851_rdreg16(ks, KS_CIDER);
+ if ((cider & ~CIDER_REV_MASK) != CIDER_ID) {
dev_err(&spi->dev, "failed to read device ID\n");
ret = -ENODEV;
goto err_id;
@@ -1516,15 +1519,14 @@ static int __devinit ks8851_probe(struct spi_device *spi)
}
netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
- CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
- ndev->dev_addr, ndev->irq,
+ CIDER_REV_GET(cider), ndev->dev_addr, ndev->irq,
ks->rc_ccr & CCR_EEPROM ? "has" : "no");
return 0;
err_netdev:
- free_irq(ndev->irq, ndev);
+ free_irq(ndev->irq, ks);
err_id:
err_irq:
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index b8104d9..5ffde23 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -40,7 +40,7 @@
#define DRV_NAME "ks8851_mll"
static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
-#define MAX_RECV_FRAMES 32
+#define MAX_RECV_FRAMES 255
#define MAX_BUF_SIZE 2048
#define TX_BUF_SIZE 2000
#define RX_BUF_SIZE 2000
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index ef723b1..eaf9ff0 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5675,7 +5675,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr)
memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
}
- memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN);
+ memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN);
interrupt = hw_block_intr(hw);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index abc7907..b3287c0 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -958,6 +958,11 @@ static inline void cp_start_hw (struct cp_private *cp)
cpw8(Cmd, RxOn | TxOn);
}
+static void cp_enable_irq(struct cp_private *cp)
+{
+ cpw16_f(IntrMask, cp_intr_mask);
+}
+
static void cp_init_hw (struct cp_private *cp)
{
struct net_device *dev = cp->dev;
@@ -997,8 +1002,6 @@ static void cp_init_hw (struct cp_private *cp)
cpw16(MultiIntr, 0);
- cpw16_f(IntrMask, cp_intr_mask);
-
cpw8_f(Cfg9346, Cfg9346_Lock);
}
@@ -1130,6 +1133,8 @@ static int cp_open (struct net_device *dev)
if (rc)
goto err_out_hw;
+ cp_enable_irq(cp);
+
netif_carrier_off(dev);
mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
netif_start_queue(dev);
@@ -2031,6 +2036,7 @@ static int cp_resume (struct pci_dev *pdev)
/* FIXME: sh*t may happen if the Rx ring buffer is depleted */
cp_init_rings_index (cp);
cp_init_hw (cp);
+ cp_enable_irq(cp);
netif_start_queue (dev);
spin_lock_irqsave (&cp->lock, flags);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index f545093..ce6b44d 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -61,8 +61,12 @@
#define R8169_MSG_DEFAULT \
(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
-#define TX_BUFFS_AVAIL(tp) \
- (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
+#define TX_SLOTS_AVAIL(tp) \
+ (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
+
+/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
+#define TX_FRAGS_READY_FOR(tp,nr_frags) \
+ (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
@@ -5115,7 +5119,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
u32 opts[2];
int frags;
- if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
+ if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
goto err_stop_0;
}
@@ -5169,7 +5173,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
mmiowb();
- if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
+ if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
* not miss a ring update when it notices a stopped queue.
*/
@@ -5183,7 +5187,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
* can't.
*/
smp_mb();
- if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
+ if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
netif_wake_queue(dev);
}
@@ -5306,7 +5310,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
*/
smp_mb();
if (netif_queue_stopped(dev) &&
- (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
+ TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
netif_wake_queue(dev);
}
/*
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 3cbfbff..4a00053 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1349,7 +1349,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
}
/* RSS might be usable on VFs even if it is disabled on the PF */
- efx->rss_spread = (efx->n_rx_channels > 1 ?
+ efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ?
efx->n_rx_channels : efx_vf_size(efx));
return 0;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 4a69710..cd3defb 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1166,10 +1166,8 @@ smsc911x_rx_counterrors(struct net_device *dev, unsigned int rxstat)
/* Quickly dumps bad packets */
static void
-smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes)
+smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktwords)
{
- unsigned int pktwords = (pktbytes + NET_IP_ALIGN + 3) >> 2;
-
if (likely(pktwords >= 4)) {
unsigned int timeout = 500;
unsigned int val;
@@ -1233,7 +1231,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
continue;
}
- skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN);
+ skb = netdev_alloc_skb(dev, pktwords << 2);
if (unlikely(!skb)) {
SMSC_WARN(pdata, rx_err,
"Unable to allocate skb for rx packet");
@@ -1243,14 +1241,12 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
break;
}
- skb->data = skb->head;
- skb_reset_tail_pointer(skb);
+ pdata->ops->rx_readfifo(pdata,
+ (unsigned int *)skb->data, pktwords);
/* Align IP on 16B boundary */
skb_reserve(skb, NET_IP_ALIGN);
skb_put(skb, pktlength - 4);
- pdata->ops->rx_readfifo(pdata,
- (unsigned int *)skb->head, pktwords);
skb->protocol = eth_type_trans(skb, dev);
skb_checksum_none_assert(skb);
netif_receive_skb(skb);
@@ -1565,7 +1561,7 @@ static int smsc911x_open(struct net_device *dev)
smsc911x_reg_write(pdata, FIFO_INT, temp);
/* set RX Data offset to 2 bytes for alignment */
- smsc911x_reg_write(pdata, RX_CFG, (2 << 8));
+ smsc911x_reg_write(pdata, RX_CFG, (NET_IP_ALIGN << 8));
/* enable NAPI polling before enabling RX interrupts */
napi_enable(&pdata->napi);
@@ -2382,7 +2378,6 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
pdata = netdev_priv(dev);
-
dev->irq = irq_res->start;
irq_flags = irq_res->flags & IRQF_TRIGGER_MASK;
pdata->ioaddr = ioremap_nocache(res->start, res_size);
@@ -2446,7 +2441,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
if (retval) {
SMSC_WARN(pdata, probe,
"Unable to claim requested irq: %d", dev->irq);
- goto out_free_irq;
+ goto out_disable_resources;
}
retval = register_netdev(dev);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 558409f..4ba9690 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2339,7 +2339,7 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
netif_device_detach(dev);
/* Switch off chip, remember WOL setting */
- gp->asleep_wol = gp->wake_on_lan;
+ gp->asleep_wol = !!gp->wake_on_lan;
gem_do_stop(dev, gp->asleep_wol);
/* Unlock the network stack */
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 174a334..08aff1a 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1511,7 +1511,7 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
static int match_first_device(struct device *dev, void *data)
{
- return 1;
+ return !strncmp(dev_name(dev), "davinci_mdio", 12);
}
/**
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 2757c7d..e4e4708 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -181,6 +181,11 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
__davinci_mdio_reset(data);
return -EAGAIN;
}
+
+ reg = __raw_readl(&regs->user[0].access);
+ if ((reg & USERACCESS_GO) == 0)
+ return 0;
+
dev_err(data->dev, "timed out waiting for user access\n");
return -ETIMEDOUT;
}
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 817ad3b..efd3669 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -228,7 +228,7 @@ tlan_get_skb(const struct tlan_list *tag)
unsigned long addr;
addr = tag->buffer[9].address;
- addr |= (tag->buffer[8].address << 16) << 16;
+ addr |= ((unsigned long) tag->buffer[8].address << 16) << 16;
return (struct sk_buff *) addr;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index cc83af0..44b8d2b 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -2,9 +2,7 @@
* Definitions for Xilinx Axi Ethernet device driver.
*
* Copyright (c) 2009 Secret Lab Technologies, Ltd.
- * Copyright (c) 2010 Xilinx, Inc. All rights reserved.
- * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
- * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
+ * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
*/
#ifndef XILINX_AXIENET_H
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 2fcbeba..9c365e1 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -4,9 +4,9 @@
* Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
* Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
* Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
- * Copyright (c) 2010 Xilinx, Inc. All rights reserved.
- * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
- * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
+ * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
+ * Copyright (c) 2010 - 2011 PetaLogix
+ * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
*
* This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
* and Spartan6.
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index d70b6e7..e90e1f4 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -2,9 +2,9 @@
* MDIO bus driver for the Xilinx Axi Ethernet device
*
* Copyright (c) 2009 Secret Lab Technologies, Ltd.
- * Copyright (c) 2010 Xilinx, Inc. All rights reserved.
- * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
- * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
+ * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
+ * Copyright (c) 2010 - 2011 PetaLogix
+ * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
*/
#include <linux/of_address.h>
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index dd29478..2d59138 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -44,6 +44,7 @@ struct net_device_context {
/* point back to our device context */
struct hv_device *device_ctx;
struct delayed_work dwork;
+ struct work_struct work;
};
@@ -51,30 +52,22 @@ static int ring_size = 128;
module_param(ring_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
-struct set_multicast_work {
- struct work_struct work;
- struct net_device *net;
-};
-
static void do_set_multicast(struct work_struct *w)
{
- struct set_multicast_work *swk =
- container_of(w, struct set_multicast_work, work);
- struct net_device *net = swk->net;
-
- struct net_device_context *ndevctx = netdev_priv(net);
+ struct net_device_context *ndevctx =
+ container_of(w, struct net_device_context, work);
struct netvsc_device *nvdev;
struct rndis_device *rdev;
nvdev = hv_get_drvdata(ndevctx->device_ctx);
- if (nvdev == NULL)
- goto out;
+ if (nvdev == NULL || nvdev->ndev == NULL)
+ return;
rdev = nvdev->extension;
if (rdev == NULL)
- goto out;
+ return;
- if (net->flags & IFF_PROMISC)
+ if (nvdev->ndev->flags & IFF_PROMISC)
rndis_filter_set_packet_filter(rdev,
NDIS_PACKET_TYPE_PROMISCUOUS);
else
@@ -82,21 +75,13 @@ static void do_set_multicast(struct work_struct *w)
NDIS_PACKET_TYPE_BROADCAST |
NDIS_PACKET_TYPE_ALL_MULTICAST |
NDIS_PACKET_TYPE_DIRECTED);
-
-out:
- kfree(w);
}
static void netvsc_set_multicast_list(struct net_device *net)
{
- struct set_multicast_work *swk =
- kmalloc(sizeof(struct set_multicast_work), GFP_ATOMIC);
- if (swk == NULL)
- return;
+ struct net_device_context *net_device_ctx = netdev_priv(net);
- swk->net = net;
- INIT_WORK(&swk->work, do_set_multicast);
- schedule_work(&swk->work);
+ schedule_work(&net_device_ctx->work);
}
static int netvsc_open(struct net_device *net)
@@ -125,6 +110,8 @@ static int netvsc_close(struct net_device *net)
netif_tx_disable(net);
+ /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
+ cancel_work_sync(&net_device_ctx->work);
ret = rndis_filter_close(device_obj);
if (ret != 0)
netdev_err(net, "unable to close device (ret %d).\n", ret);
@@ -335,6 +322,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
nvdev->start_remove = true;
cancel_delayed_work_sync(&ndevctx->dwork);
+ cancel_work_sync(&ndevctx->work);
netif_tx_disable(ndev);
rndis_filter_device_remove(hdev);
@@ -403,6 +391,7 @@ static int netvsc_probe(struct hv_device *dev,
net_device_ctx->device_ctx = dev;
hv_set_drvdata(dev, net);
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
+ INIT_WORK(&net_device_ctx->work, do_set_multicast);
net->netdev_ops = &device_ops;
@@ -456,6 +445,7 @@ static int netvsc_remove(struct hv_device *dev)
ndev_ctx = netdev_priv(net);
cancel_delayed_work_sync(&ndev_ctx->dwork);
+ cancel_work_sync(&ndev_ctx->work);
/* Stop outbound asap */
netif_tx_disable(net);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index f975afd..025367a 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -259,7 +259,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
xmit_world:
skb->ip_summed = ip_summed;
- skb_set_dev(skb, vlan->lowerdev);
+ skb->dev = vlan->lowerdev;
return dev_queue_xmit(skb);
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 0427c65..cb8fd50 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -1,5 +1,6 @@
#include <linux/etherdevice.h>
#include <linux/if_macvlan.h>
+#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/nsproxy.h>
#include <linux/compat.h>
@@ -759,6 +760,8 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
struct macvlan_dev *vlan;
int ret;
int vnet_hdr_len = 0;
+ int vlan_offset = 0;
+ int copied;
if (q->flags & IFF_VNET_HDR) {
struct virtio_net_hdr vnet_hdr;
@@ -773,18 +776,48 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
return -EFAULT;
}
+ copied = vnet_hdr_len;
+
+ if (!vlan_tx_tag_present(skb))
+ len = min_t(int, skb->len, len);
+ else {
+ int copy;
+ struct {
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+ } veth;
+ veth.h_vlan_proto = htons(ETH_P_8021Q);
+ veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
+
+ vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
+ len = min_t(int, skb->len + VLAN_HLEN, len);
+
+ copy = min_t(int, vlan_offset, len);
+ ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
+ len -= copy;
+ copied += copy;
+ if (ret || !len)
+ goto done;
+
+ copy = min_t(int, sizeof(veth), len);
+ ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
+ len -= copy;
+ copied += copy;
+ if (ret || !len)
+ goto done;
+ }
- len = min_t(int, skb->len, len);
-
- ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
+ ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
+ copied += len;
+done:
rcu_read_lock_bh();
vlan = rcu_dereference_bh(q->vlan);
if (vlan)
- macvlan_count_rx(vlan, len, ret == 0, 0);
+ macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
rcu_read_unlock_bh();
- return ret ? ret : (len + vnet_hdr_len);
+ return ret ? ret : copied;
}
static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index f08c85a..5ac46f5 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -40,6 +40,7 @@ MODULE_LICENSE("GPL");
#define IP1001_PHASE_SEL_MASK 3 /* IP1001 RX/TXPHASE_SEL */
#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */
+#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */
static int ip175c_config_init(struct phy_device *phydev)
{
@@ -185,6 +186,15 @@ static int ip175c_config_aneg(struct phy_device *phydev)
return 0;
}
+static int ip101a_g_ack_interrupt(struct phy_device *phydev)
+{
+ int err = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
static struct phy_driver ip175c_driver = {
.phy_id = 0x02430d80,
.name = "ICPlus IP175C",
@@ -204,7 +214,6 @@ static struct phy_driver ip1001_driver = {
.phy_id_mask = 0x0ffffff0,
.features = PHY_GBIT_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &ip1001_config_init,
.config_aneg = &genphy_config_aneg,
.read_status = &genphy_read_status,
@@ -220,6 +229,7 @@ static struct phy_driver ip101a_g_driver = {
.features = PHY_BASIC_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause,
.flags = PHY_HAS_INTERRUPT,
+ .ack_interrupt = ip101a_g_ack_interrupt,
.config_init = &ip101a_g_config_init,
.config_aneg = &genphy_config_aneg,
.read_status = &genphy_read_status,
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 33f8c51..21d7151 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -235,7 +235,7 @@ struct ppp_net {
/* Prototypes. */
static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
struct file *file, unsigned int cmd, unsigned long arg);
-static int ppp_xmit_process(struct ppp *ppp);
+static void ppp_xmit_process(struct ppp *ppp);
static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
static void ppp_push(struct ppp *ppp);
static void ppp_channel_push(struct channel *pch);
@@ -969,8 +969,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
put_unaligned_be16(proto, pp);
skb_queue_tail(&ppp->file.xq, skb);
- if (!ppp_xmit_process(ppp))
- netif_stop_queue(dev);
+ ppp_xmit_process(ppp);
return NETDEV_TX_OK;
outf:
@@ -1048,11 +1047,10 @@ static void ppp_setup(struct net_device *dev)
* Called to do any work queued up on the transmit side
* that can now be done.
*/
-static int
+static void
ppp_xmit_process(struct ppp *ppp)
{
struct sk_buff *skb;
- int ret = 0;
ppp_xmit_lock(ppp);
if (!ppp->closing) {
@@ -1062,13 +1060,12 @@ ppp_xmit_process(struct ppp *ppp)
ppp_send_frame(ppp, skb);
/* If there's no work left to do, tell the core net
code that we can accept some more. */
- if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) {
+ if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
netif_wake_queue(ppp->dev);
- ret = 1;
- }
+ else
+ netif_stop_queue(ppp->dev);
}
ppp_xmit_unlock(ppp);
- return ret;
}
static inline struct sk_buff *
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 5ee032c..42b5151 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -355,7 +355,7 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
u32 packet_len;
u32 padbytes = 0xffff0000;
- padlen = ((skb->len + 4) % 512) ? 0 : 4;
+ padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4;
if ((!skb_cloned(skb)) &&
((headroom + tailroom) >= (4 + padlen))) {
@@ -377,7 +377,7 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
cpu_to_le32s(&packet_len);
skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
- if ((skb->len % 512) == 0) {
+ if (padlen) {
cpu_to_le32s(&padbytes);
memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
skb_put(skb, sizeof(padbytes));
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 90a3002..00880ed 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -83,6 +83,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
struct cdc_state *info = (void *) &dev->data;
int status;
int rndis;
+ bool android_rndis_quirk = false;
struct usb_driver *driver = driver_of(intf);
struct usb_cdc_mdlm_desc *desc = NULL;
struct usb_cdc_mdlm_detail_desc *detail = NULL;
@@ -195,6 +196,11 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
info->control,
info->u->bSlaveInterface0,
info->data);
+ /* fall back to hard-wiring for RNDIS */
+ if (rndis) {
+ android_rndis_quirk = true;
+ goto next_desc;
+ }
goto bad_desc;
}
if (info->control != intf) {
@@ -271,11 +277,15 @@ next_desc:
/* Microsoft ActiveSync based and some regular RNDIS devices lack the
* CDC descriptors, so we'll hard-wire the interfaces and not check
* for descriptors.
+ *
+ * Some Android RNDIS devices have a CDC Union descriptor pointing
+ * to non-existing interfaces. Ignore that and attempt the same
+ * hard-wired 0 and 1 interfaces.
*/
- if (rndis && !info->u) {
+ if (rndis && (!info->u || android_rndis_quirk)) {
info->control = usb_ifnum_to_if(dev->udev, 0);
info->data = usb_ifnum_to_if(dev->udev, 1);
- if (!info->control || !info->data) {
+ if (!info->control || !info->data || info->control != intf) {
dev_dbg(&intf->dev,
"rndis: master #0/%p slave #1/%p\n",
info->control,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 552d24b..d316503b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -365,6 +365,27 @@ static const struct driver_info qmi_wwan_force_int4 = {
.data = BIT(4), /* interface whitelist bitmap */
};
+/* Sierra Wireless provide equally useless interface descriptors
+ * Devices in QMI mode can be switched between two different
+ * configurations:
+ * a) USB interface #8 is QMI/wwan
+ * b) USB interfaces #8, #19 and #20 are QMI/wwan
+ *
+ * Both configurations provide a number of other interfaces (serial++),
+ * some of which have the same endpoint configuration as we expect, so
+ * a whitelist or blacklist is necessary.
+ *
+ * FIXME: The below whitelist should include BIT(20). It does not
+ * because I cannot get it to work...
+ */
+static const struct driver_info qmi_wwan_sierra = {
+ .description = "Sierra Wireless wwan/QMI device",
+ .flags = FLAG_WWAN,
+ .bind = qmi_wwan_bind_gobi,
+ .unbind = qmi_wwan_unbind_shared,
+ .manage_power = qmi_wwan_manage_power,
+ .data = BIT(8) | BIT(19), /* interface whitelist bitmap */
+};
#define HUAWEI_VENDOR_ID 0x12D1
#define QMI_GOBI_DEVICE(vend, prod) \
@@ -445,6 +466,15 @@ static const struct usb_device_id products[] = {
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int4,
},
+ { /* Sierra Wireless MC77xx in QMI mode */
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x1199,
+ .idProduct = 0x68a2,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0xff,
+ .bInterfaceProtocol = 0xff,
+ .driver_info = (unsigned long)&qmi_wwan_sierra,
+ },
{QMI_GOBI_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
{QMI_GOBI_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
{QMI_GOBI_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 187d01c..00103a8 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -98,7 +98,7 @@ static int __must_check smsc75xx_read_reg(struct usbnet *dev, u32 index,
if (unlikely(ret < 0))
netdev_warn(dev->net,
- "Failed to read register index 0x%08x", index);
+ "Failed to read reg index 0x%08x: %d", index, ret);
le32_to_cpus(buf);
*data = *buf;
@@ -128,7 +128,7 @@ static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index,
if (unlikely(ret < 0))
netdev_warn(dev->net,
- "Failed to write register index 0x%08x", index);
+ "Failed to write reg index 0x%08x: %d", index, ret);
kfree(buf);
@@ -171,7 +171,7 @@ static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
idx &= dev->mii.reg_num_mask;
addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR)
| ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR)
- | MII_ACCESS_READ;
+ | MII_ACCESS_READ | MII_ACCESS_BUSY;
ret = smsc75xx_write_reg(dev, MII_ACCESS, addr);
check_warn_goto_done(ret, "Error writing MII_ACCESS");
@@ -210,7 +210,7 @@ static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
idx &= dev->mii.reg_num_mask;
addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR)
| ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR)
- | MII_ACCESS_WRITE;
+ | MII_ACCESS_WRITE | MII_ACCESS_BUSY;
ret = smsc75xx_write_reg(dev, MII_ACCESS, addr);
check_warn_goto_done(ret, "Error writing MII_ACCESS");
@@ -508,9 +508,10 @@ static int smsc75xx_link_reset(struct usbnet *dev)
u16 lcladv, rmtadv;
int ret;
- /* clear interrupt status */
+ /* read and write to clear phy interrupt status */
ret = smsc75xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC);
check_warn_return(ret, "Error reading PHY_INT_SRC");
+ smsc75xx_mdio_write(dev->net, mii->phy_id, PHY_INT_SRC, 0xffff);
ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL);
check_warn_return(ret, "Error writing INT_STS");
@@ -643,7 +644,7 @@ static int smsc75xx_set_mac_address(struct usbnet *dev)
static int smsc75xx_phy_initialize(struct usbnet *dev)
{
- int bmcr, timeout = 0;
+ int bmcr, ret, timeout = 0;
/* Initialize MII structure */
dev->mii.dev = dev->net;
@@ -651,6 +652,7 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
dev->mii.mdio_write = smsc75xx_mdio_write;
dev->mii.phy_id_mask = 0x1f;
dev->mii.reg_num_mask = 0x1f;
+ dev->mii.supports_gmii = 1;
dev->mii.phy_id = SMSC75XX_INTERNAL_PHY_ID;
/* reset phy and wait for reset to complete */
@@ -661,7 +663,7 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
bmcr = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR);
check_warn_return(bmcr, "Error reading MII_BMCR");
timeout++;
- } while ((bmcr & MII_BMCR) && (timeout < 100));
+ } while ((bmcr & BMCR_RESET) && (timeout < 100));
if (timeout >= 100) {
netdev_warn(dev->net, "timeout on PHY Reset");
@@ -671,10 +673,13 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
ADVERTISE_PAUSE_ASYM);
+ smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_CTRL1000,
+ ADVERTISE_1000FULL);
- /* read to clear */
- smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
- check_warn_return(bmcr, "Error reading PHY_INT_SRC");
+ /* read and write to clear phy interrupt status */
+ ret = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
+ check_warn_return(ret, "Error reading PHY_INT_SRC");
+ smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_SRC, 0xffff);
smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK,
PHY_INT_MASK_DEFAULT);
@@ -946,6 +951,14 @@ static int smsc75xx_reset(struct usbnet *dev)
ret = smsc75xx_write_reg(dev, INT_EP_CTL, buf);
check_warn_return(ret, "Failed to write INT_EP_CTL: %d", ret);
+ /* allow mac to detect speed and duplex from phy */
+ ret = smsc75xx_read_reg(dev, MAC_CR, &buf);
+ check_warn_return(ret, "Failed to read MAC_CR: %d", ret);
+
+ buf |= (MAC_CR_ADD | MAC_CR_ASD);
+ ret = smsc75xx_write_reg(dev, MAC_CR, buf);
+ check_warn_return(ret, "Failed to write MAC_CR: %d", ret);
+
ret = smsc75xx_read_reg(dev, MAC_TX, &buf);
check_warn_return(ret, "Failed to read MAC_TX: %d", ret);
@@ -1051,6 +1064,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->ethtool_ops = &smsc75xx_ethtool_ops;
dev->net->flags |= IFF_MULTICAST;
dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD;
+ dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
return 0;
}
@@ -1211,7 +1225,7 @@ static const struct driver_info smsc75xx_info = {
.rx_fixup = smsc75xx_rx_fixup,
.tx_fixup = smsc75xx_tx_fixup,
.status = smsc75xx_status,
- .flags = FLAG_ETHER | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR,
};
static const struct usb_device_id products[] = {
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 5f19f84..94ae669 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1017,6 +1017,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->ethtool_ops = &smsc95xx_ethtool_ops;
dev->net->flags |= IFF_MULTICAST;
dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
+ dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
return 0;
}
@@ -1191,7 +1192,7 @@ static const struct driver_info smsc95xx_info = {
.rx_fixup = smsc95xx_rx_fixup,
.tx_fixup = smsc95xx_tx_fixup,
.status = smsc95xx_status,
- .flags = FLAG_ETHER | FLAG_SEND_ZLP,
+ .flags = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR,
};
static const struct usb_device_id products[] = {
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index b7b3f5b..2d927fb 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -210,6 +210,7 @@ static int init_status (struct usbnet *dev, struct usb_interface *intf)
} else {
usb_fill_int_urb(dev->interrupt, dev->udev, pipe,
buf, maxp, intr_complete, dev, period);
+ dev->interrupt->transfer_flags |= URB_FREE_BUFFER;
dev_dbg(&intf->dev,
"status ep%din, %d bytes period %d\n",
usb_pipeendpoint(pipe), maxp, period);
@@ -1443,7 +1444,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
status = register_netdev (net);
if (status)
- goto out3;
+ goto out4;
netif_info(dev, probe, dev->net,
"register '%s' at usb-%s-%s, %s, %pM\n",
udev->dev.driver->name,
@@ -1461,6 +1462,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
return 0;
+out4:
+ usb_free_urb(dev->interrupt);
out3:
if (info->unbind)
info->unbind (dev, udev);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4de2760..af8acc8 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -626,16 +626,15 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
/* This can happen with OOM and indirect buffers. */
if (unlikely(capacity < 0)) {
if (likely(capacity == -ENOMEM)) {
- if (net_ratelimit()) {
+ if (net_ratelimit())
dev_warn(&dev->dev,
"TX queue failure: out of memory\n");
- } else {
+ } else {
dev->stats.tx_fifo_errors++;
if (net_ratelimit())
dev_warn(&dev->dev,
"Unexpected TX queue failure: %d\n",
capacity);
- }
}
dev->stats.tx_dropped++;
kfree_skb(skb);
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index ebb9f24..1a62318 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2483,6 +2483,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pr_err("Control memory remap failed\n");
pci_release_regions(pdev);
pci_disable_device(pdev);
+ iounmap(card->mem);
kfree(card);
return -ENODEV;
}
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index 8faa129..aec33cc 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -19,6 +19,7 @@
#include <linux/nl80211.h>
#include <linux/platform_device.h>
#include <linux/etherdevice.h>
+#include <linux/export.h>
#include <ar231x_platform.h>
#include "ath5k.h"
#include "debug.h"
@@ -119,7 +120,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
if (res == NULL) {
dev_err(&pdev->dev, "no IRQ resource found\n");
ret = -ENXIO;
- goto err_out;
+ goto err_iounmap;
}
irq = res->start;
@@ -128,7 +129,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
if (hw == NULL) {
dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
ret = -ENOMEM;
- goto err_out;
+ goto err_iounmap;
}
ah = hw->priv;
@@ -185,6 +186,8 @@ static int ath_ahb_probe(struct platform_device *pdev)
err_free_hw:
ieee80211_free_hw(hw);
platform_set_drvdata(pdev, NULL);
+ err_iounmap:
+ iounmap(mem);
err_out:
return ret;
}
@@ -217,6 +220,7 @@ static int ath_ahb_remove(struct platform_device *pdev)
}
ath5k_deinit_ah(ah);
+ iounmap(ah->iobase);
platform_set_drvdata(pdev, NULL);
ieee80211_free_hw(hw);
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index d7d8e91..aba0880 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -869,7 +869,7 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
ar5008_hw_set_channel_regs(ah, chan);
ar5008_hw_init_chain_masks(ah);
ath9k_olc_init(ah);
- ath9k_hw_apply_txpower(ah, chan);
+ ath9k_hw_apply_txpower(ah, chan, false);
/* Write analog registers */
if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 59647a3..3d400e8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -54,7 +54,7 @@ void ar9003_paprd_enable(struct ath_hw *ah, bool val)
if (val) {
ah->paprd_table_write_done = true;
- ath9k_hw_apply_txpower(ah, chan);
+ ath9k_hw_apply_txpower(ah, chan, false);
}
REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B0,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index bc992b2..600aca9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -373,7 +373,7 @@ static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
else
spur_subchannel_sd = 0;
- spur_freq_sd = (freq_offset << 9) / 11;
+ spur_freq_sd = ((freq_offset + 10) << 9) / 11;
} else {
if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
@@ -382,7 +382,7 @@ static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
else
spur_subchannel_sd = 1;
- spur_freq_sd = (freq_offset << 9) / 11;
+ spur_freq_sd = ((freq_offset - 10) << 9) / 11;
}
@@ -694,7 +694,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
ar9003_hw_override_ini(ah);
ar9003_hw_set_channel_regs(ah, chan);
ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
- ath9k_hw_apply_txpower(ah, chan);
+ ath9k_hw_apply_txpower(ah, chan, false);
if (AR_SREV_9462(ah)) {
if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index f272236..b34e8b2 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -824,6 +824,8 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
regulatory->max_power_level = ratesArray[i];
}
+ ath9k_hw_update_regulatory_maxpower(ah);
+
if (test)
return;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 6c69e4e..fa84e37 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1454,7 +1454,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
return false;
}
ath9k_hw_set_clockrate(ah);
- ath9k_hw_apply_txpower(ah, chan);
+ ath9k_hw_apply_txpower(ah, chan, false);
ath9k_hw_rfbus_done(ah);
if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
@@ -2652,7 +2652,8 @@ static int get_antenna_gain(struct ath_hw *ah, struct ath9k_channel *chan)
return ah->eep_ops->get_eeprom(ah, gain_param);
}
-void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan)
+void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
+ bool test)
{
struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
struct ieee80211_channel *channel;
@@ -2673,7 +2674,7 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan)
ah->eep_ops->set_txpower(ah, chan,
ath9k_regd_get_ctl(reg, chan),
- ant_reduction, new_pwr, false);
+ ant_reduction, new_pwr, test);
}
void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
@@ -2686,7 +2687,7 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
if (test)
channel->max_power = MAX_RATE_POWER / 2;
- ath9k_hw_apply_txpower(ah, chan);
+ ath9k_hw_apply_txpower(ah, chan, test);
if (test)
channel->max_power = DIV_ROUND_UP(reg->max_power_level, 2);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index aa1680a..e88f182 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -985,7 +985,8 @@ void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len);
/* PHY */
void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
u32 *coef_mantissa, u32 *coef_exponent);
-void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan);
+void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
+ bool test);
/*
* Code Specific to AR5008, AR9001 or AR9002,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 2504ab0..798ea57 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1548,6 +1548,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_conf *conf = &hw->conf;
+ bool reset_channel = false;
ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
@@ -1556,6 +1557,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
if (sc->ps_idle)
ath_cancel_work(sc);
+ else
+ /*
+ * The chip needs a reset to properly wake up from
+ * full sleep
+ */
+ reset_channel = ah->chip_fullsleep;
}
/*
@@ -1584,7 +1591,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
}
}
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) {
struct ieee80211_channel *curchan = hw->conf.channel;
int pos = curchan->hw_value;
int old_pos = -1;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 834e6bc..23eaa1b 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1820,6 +1820,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
struct ath_frame_info *fi = get_frame_info(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ath_buf *bf;
+ int fragno;
u16 seqno;
bf = ath_tx_get_buffer(sc);
@@ -1831,9 +1832,16 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
ATH_TXBUF_RESET(bf);
if (tid) {
+ fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
seqno = tid->seq_next;
hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
- INCR(tid->seq_next, IEEE80211_SEQ_MAX);
+
+ if (fragno)
+ hdr->seq_ctrl |= cpu_to_le16(fragno);
+
+ if (!ieee80211_has_morefrags(hdr->frame_control))
+ INCR(tid->seq_next, IEEE80211_SEQ_MAX);
+
bf->bf_state.seqno = seqno;
}
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index c79e663..e4d6dc2 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4827,8 +4827,14 @@ static int b43_op_start(struct ieee80211_hw *hw)
out_mutex_unlock:
mutex_unlock(&wl->mutex);
- /* reload configuration */
- b43_op_config(hw, ~0);
+ /*
+ * Configuration may have been overwritten during initialization.
+ * Reload the configuration, but only if initialization was
+ * successful. Reloading the configuration after a failed init
+ * may hang the system.
+ */
+ if (!err)
+ b43_op_config(hw, ~0);
return err;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 4688904..758c115 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -108,9 +108,15 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
sdio_release_host(sdfunc);
}
} else if (regaddr == SDIO_CCCR_ABORT) {
+ sdfunc = kmemdup(sdiodev->func[0], sizeof(struct sdio_func),
+ GFP_KERNEL);
+ if (!sdfunc)
+ return -ENOMEM;
+ sdfunc->num = 0;
sdio_claim_host(sdfunc);
sdio_writeb(sdfunc, *byte, regaddr, &err_ret);
sdio_release_host(sdfunc);
+ kfree(sdfunc);
} else if (regaddr < 0xF0) {
brcmf_dbg(ERROR, "F0 Wr:0x%02x: write disallowed\n", regaddr);
err_ret = -EPERM;
@@ -486,7 +492,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
kfree(bus_if);
return -ENOMEM;
}
- sdiodev->func[0] = func->card->sdio_func[0];
+ sdiodev->func[0] = func;
sdiodev->func[1] = func;
sdiodev->bus_if = bus_if;
bus_if->bus_priv.sdio = sdiodev;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 2bf5dda..e2b34e1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -574,6 +574,8 @@ struct brcmf_sdio {
struct task_struct *dpc_tsk;
struct completion dpc_wait;
+ struct list_head dpc_tsklst;
+ spinlock_t dpc_tl_lock;
struct semaphore sdsem;
@@ -2594,29 +2596,59 @@ clkwait:
return resched;
}
+static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus)
+{
+ struct list_head *new_hd;
+ unsigned long flags;
+
+ if (in_interrupt())
+ new_hd = kzalloc(sizeof(struct list_head), GFP_ATOMIC);
+ else
+ new_hd = kzalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (new_hd == NULL)
+ return;
+
+ spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+ list_add_tail(new_hd, &bus->dpc_tsklst);
+ spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
+}
+
static int brcmf_sdbrcm_dpc_thread(void *data)
{
struct brcmf_sdio *bus = (struct brcmf_sdio *) data;
+ struct list_head *cur_hd, *tmp_hd;
+ unsigned long flags;
allow_signal(SIGTERM);
/* Run until signal received */
while (1) {
if (kthread_should_stop())
break;
- if (!wait_for_completion_interruptible(&bus->dpc_wait)) {
- /* Call bus dpc unless it indicated down
- (then clean stop) */
- if (bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN) {
- if (brcmf_sdbrcm_dpc(bus))
- complete(&bus->dpc_wait);
- } else {
+
+ if (list_empty(&bus->dpc_tsklst))
+ if (wait_for_completion_interruptible(&bus->dpc_wait))
+ break;
+
+ spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+ list_for_each_safe(cur_hd, tmp_hd, &bus->dpc_tsklst) {
+ spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
+
+ if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
/* after stopping the bus, exit thread */
brcmf_sdbrcm_bus_stop(bus->sdiodev->dev);
bus->dpc_tsk = NULL;
+ spin_lock_irqsave(&bus->dpc_tl_lock, flags);
break;
}
- } else
- break;
+
+ if (brcmf_sdbrcm_dpc(bus))
+ brcmf_sdbrcm_adddpctsk(bus);
+
+ spin_lock_irqsave(&bus->dpc_tl_lock, flags);
+ list_del(cur_hd);
+ kfree(cur_hd);
+ }
+ spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
}
return 0;
}
@@ -2669,8 +2701,10 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
/* Schedule DPC if needed to send queued packet(s) */
if (!bus->dpc_sched) {
bus->dpc_sched = true;
- if (bus->dpc_tsk)
+ if (bus->dpc_tsk) {
+ brcmf_sdbrcm_adddpctsk(bus);
complete(&bus->dpc_wait);
+ }
}
return ret;
@@ -3514,8 +3548,10 @@ void brcmf_sdbrcm_isr(void *arg)
brcmf_dbg(ERROR, "isr w/o interrupt configured!\n");
bus->dpc_sched = true;
- if (bus->dpc_tsk)
+ if (bus->dpc_tsk) {
+ brcmf_sdbrcm_adddpctsk(bus);
complete(&bus->dpc_wait);
+ }
}
static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
@@ -3559,8 +3595,10 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
bus->ipend = true;
bus->dpc_sched = true;
- if (bus->dpc_tsk)
+ if (bus->dpc_tsk) {
+ brcmf_sdbrcm_adddpctsk(bus);
complete(&bus->dpc_wait);
+ }
}
}
@@ -3897,6 +3935,8 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
}
/* Initialize DPC thread */
init_completion(&bus->dpc_wait);
+ INIT_LIST_HEAD(&bus->dpc_tsklst);
+ spin_lock_init(&bus->dpc_tl_lock);
bus->dpc_tsk = kthread_run(brcmf_sdbrcm_dpc_thread,
bus, "brcmf_dpc");
if (IS_ERR(bus->dpc_tsk)) {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 231ddf4..b4d9279 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -847,8 +847,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
*/
if (!(txs->status & TX_STATUS_AMPDU)
&& (txs->status & TX_STATUS_INTERMEDIATE)) {
- wiphy_err(wlc->wiphy, "%s: INTERMEDIATE but not AMPDU\n",
- __func__);
+ BCMMSG(wlc->wiphy, "INTERMEDIATE but not AMPDU\n");
return false;
}
@@ -7614,6 +7613,7 @@ brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
{
int len_mpdu;
struct ieee80211_rx_status rx_status;
+ struct ieee80211_hdr *hdr;
memset(&rx_status, 0, sizeof(rx_status));
prep_mac80211_status(wlc, rxh, p, &rx_status);
@@ -7623,6 +7623,13 @@ brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
skb_pull(p, D11_PHY_HDR_LEN);
__skb_trim(p, len_mpdu);
+ /* unmute transmit */
+ if (wlc->hw->suspended_fifos) {
+ hdr = (struct ieee80211_hdr *)p->data;
+ if (ieee80211_is_beacon(hdr->frame_control))
+ brcms_b_mute(wlc->hw, false);
+ }
+
memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status));
ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p);
}
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 2b02257..1779db3 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -2191,6 +2191,7 @@ static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
{
int rc = 0;
unsigned long flags;
+ unsigned long now, end;
spin_lock_irqsave(&priv->lock, flags);
if (priv->status & STATUS_HCMD_ACTIVE) {
@@ -2232,10 +2233,20 @@ static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
}
spin_unlock_irqrestore(&priv->lock, flags);
+ now = jiffies;
+ end = now + HOST_COMPLETE_TIMEOUT;
+again:
rc = wait_event_interruptible_timeout(priv->wait_command_queue,
!(priv->
status & STATUS_HCMD_ACTIVE),
- HOST_COMPLETE_TIMEOUT);
+ end - now);
+ if (rc < 0) {
+ now = jiffies;
+ if (time_before(now, end))
+ goto again;
+ rc = 0;
+ }
+
if (rc == 0) {
spin_lock_irqsave(&priv->lock, flags);
if (priv->status & STATUS_HCMD_ACTIVE) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 5b0d888..8d80e23 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -46,8 +46,8 @@
#include "iwl-prph.h"
/* Highest firmware API version supported */
-#define IWL1000_UCODE_API_MAX 6
-#define IWL100_UCODE_API_MAX 6
+#define IWL1000_UCODE_API_MAX 5
+#define IWL100_UCODE_API_MAX 5
/* Oldest version we won't warn about */
#define IWL1000_UCODE_API_OK 5
@@ -226,5 +226,5 @@ const struct iwl_cfg iwl100_bg_cfg = {
IWL_DEVICE_100,
};
-MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK));
+MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 5635b9e..ea10862 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -51,10 +51,10 @@
#define IWL135_UCODE_API_MAX 6
/* Oldest version we won't warn about */
-#define IWL2030_UCODE_API_OK 5
-#define IWL2000_UCODE_API_OK 5
-#define IWL105_UCODE_API_OK 5
-#define IWL135_UCODE_API_OK 5
+#define IWL2030_UCODE_API_OK 6
+#define IWL2000_UCODE_API_OK 6
+#define IWL105_UCODE_API_OK 6
+#define IWL135_UCODE_API_OK 6
/* Lowest firmware API version supported */
#define IWL2030_UCODE_API_MIN 5
@@ -328,7 +328,7 @@ const struct iwl_cfg iwl135_bgn_cfg = {
.ht_params = &iwl2000_ht_params,
};
-MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_OK));
+MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_OK));
+MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_OK));
+MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index a805e97..de0920c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -51,6 +51,10 @@
#define IWL5000_UCODE_API_MAX 5
#define IWL5150_UCODE_API_MAX 2
+/* Oldest version we won't warn about */
+#define IWL5000_UCODE_API_OK 5
+#define IWL5150_UCODE_API_OK 2
+
/* Lowest firmware API version supported */
#define IWL5000_UCODE_API_MIN 1
#define IWL5150_UCODE_API_MIN 1
@@ -326,6 +330,7 @@ static const struct iwl_ht_params iwl5000_ht_params = {
#define IWL_DEVICE_5000 \
.fw_name_pre = IWL5000_FW_PRE, \
.ucode_api_max = IWL5000_UCODE_API_MAX, \
+ .ucode_api_ok = IWL5000_UCODE_API_OK, \
.ucode_api_min = IWL5000_UCODE_API_MIN, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \
.max_data_size = IWLAGN_RTC_DATA_SIZE, \
@@ -371,6 +376,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
+ .ucode_api_ok = IWL5000_UCODE_API_OK,
.ucode_api_min = IWL5000_UCODE_API_MIN,
.max_inst_size = IWLAGN_RTC_INST_SIZE,
.max_data_size = IWLAGN_RTC_DATA_SIZE,
@@ -386,6 +392,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
#define IWL_DEVICE_5150 \
.fw_name_pre = IWL5150_FW_PRE, \
.ucode_api_max = IWL5150_UCODE_API_MAX, \
+ .ucode_api_ok = IWL5150_UCODE_API_OK, \
.ucode_api_min = IWL5150_UCODE_API_MIN, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \
.max_data_size = IWLAGN_RTC_DATA_SIZE, \
@@ -409,5 +416,5 @@ const struct iwl_cfg iwl5150_abg_cfg = {
IWL_DEVICE_5150,
};
-MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK));
+MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 64060cd..f0c9150 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -53,6 +53,8 @@
/* Oldest version we won't warn about */
#define IWL6000_UCODE_API_OK 4
#define IWL6000G2_UCODE_API_OK 5
+#define IWL6050_UCODE_API_OK 5
+#define IWL6000G2B_UCODE_API_OK 6
/* Lowest firmware API version supported */
#define IWL6000_UCODE_API_MIN 4
@@ -388,7 +390,7 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
#define IWL_DEVICE_6030 \
.fw_name_pre = IWL6030_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
- .ucode_api_ok = IWL6000G2_UCODE_API_OK, \
+ .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
@@ -557,6 +559,6 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
};
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
-MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK));
+MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK));
+MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
index f4b84d1..2247460 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
@@ -773,8 +773,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
struct sk_buff *skb;
__le16 fc = hdr->frame_control;
struct iwl_rxon_context *ctx;
- struct page *p;
- int offset;
+ unsigned int hdrlen, fraglen;
/* We only process data packets if the interface is open */
if (unlikely(!priv->is_open)) {
@@ -788,16 +787,24 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
iwlagn_set_decrypted_flag(priv, hdr, ampdu_status, stats))
return;
- skb = dev_alloc_skb(128);
+ /* Dont use dev_alloc_skb(), we'll have enough headroom once
+ * ieee80211_hdr pulled.
+ */
+ skb = alloc_skb(128, GFP_ATOMIC);
if (!skb) {
- IWL_ERR(priv, "dev_alloc_skb failed\n");
+ IWL_ERR(priv, "alloc_skb failed\n");
return;
}
+ hdrlen = min_t(unsigned int, len, skb_tailroom(skb));
+ memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
+ fraglen = len - hdrlen;
- offset = (void *)hdr - rxb_addr(rxb);
- p = rxb_steal_page(rxb);
- skb_add_rx_frag(skb, 0, p, offset, len, len);
+ if (fraglen) {
+ int offset = (void *)hdr + hdrlen - rxb_addr(rxb);
+ skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
+ fraglen, rxb->truesize);
+ }
iwl_update_stats(priv, false, fc, len);
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index f1226dbf..2a9a16f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -863,7 +863,6 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
void iwlagn_prepare_restart(struct iwl_priv *priv)
{
- struct iwl_rxon_context *ctx;
bool bt_full_concurrent;
u8 bt_ci_compliance;
u8 bt_load;
@@ -872,8 +871,6 @@ void iwlagn_prepare_restart(struct iwl_priv *priv)
lockdep_assert_held(&priv->mutex);
- for_each_context(priv, ctx)
- ctx->vif = NULL;
priv->is_open = 0;
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 9020809..74bce97 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -104,15 +104,29 @@
* (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
* bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
* aligned (address bits 0-7 must be 0).
+ * Later devices have 20 (5000 series) or 30 (higher) queues, but the registers
+ * for them are in different places.
*
* Bit fields in each pointer register:
* 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
*/
-#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
-#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
-
-/* Find TFD CB base pointer for given queue (range 0-15). */
-#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
+#define FH_MEM_CBBC_0_15_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
+#define FH_MEM_CBBC_0_15_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
+#define FH_MEM_CBBC_16_19_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBF0)
+#define FH_MEM_CBBC_16_19_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
+#define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20)
+#define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80)
+
+/* Find TFD CB base pointer for given queue */
+static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
+{
+ if (chnl < 16)
+ return FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl;
+ if (chnl < 20)
+ return FH_MEM_CBBC_16_19_LOWER_BOUND + 4 * (chnl - 16);
+ WARN_ON_ONCE(chnl >= 32);
+ return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20);
+}
/**
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
index b6805f8..c24a713 100644
--- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c
+++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
@@ -1244,6 +1244,7 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
struct iwl_rxon_context *tmp, *ctx = NULL;
int err;
enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
+ bool reset = false;
IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
viftype, vif->addr);
@@ -1265,6 +1266,13 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
tmp->interface_modes | tmp->exclusive_interface_modes;
if (tmp->vif) {
+ /* On reset we need to add the same interface again */
+ if (tmp->vif == vif) {
+ reset = true;
+ ctx = tmp;
+ break;
+ }
+
/* check if this busy context is exclusive */
if (tmp->exclusive_interface_modes &
BIT(tmp->vif->type)) {
@@ -1291,7 +1299,7 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
ctx->vif = vif;
err = iwl_setup_interface(priv, ctx);
- if (!err)
+ if (!err || reset)
goto out;
ctx->vif = NULL;
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 75dc20b..3b106929 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -223,12 +223,33 @@
#define SCD_AIT (SCD_BASE + 0x0c)
#define SCD_TXFACT (SCD_BASE + 0x10)
#define SCD_ACTIVE (SCD_BASE + 0x14)
-#define SCD_QUEUE_WRPTR(x) (SCD_BASE + 0x18 + (x) * 4)
-#define SCD_QUEUE_RDPTR(x) (SCD_BASE + 0x68 + (x) * 4)
#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8)
#define SCD_AGGR_SEL (SCD_BASE + 0x248)
#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108)
-#define SCD_QUEUE_STATUS_BITS(x) (SCD_BASE + 0x10c + (x) * 4)
+
+static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl)
+{
+ if (chnl < 20)
+ return SCD_BASE + 0x18 + chnl * 4;
+ WARN_ON_ONCE(chnl >= 32);
+ return SCD_BASE + 0x284 + (chnl - 20) * 4;
+}
+
+static inline unsigned int SCD_QUEUE_RDPTR(unsigned int chnl)
+{
+ if (chnl < 20)
+ return SCD_BASE + 0x68 + chnl * 4;
+ WARN_ON_ONCE(chnl >= 32);
+ return SCD_BASE + 0x2B4 + (chnl - 20) * 4;
+}
+
+static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
+{
+ if (chnl < 20)
+ return SCD_BASE + 0x10c + chnl * 4;
+ WARN_ON_ONCE(chnl >= 32);
+ return SCD_BASE + 0x384 + (chnl - 20) * 4;
+}
/*********************** END TX SCHEDULER *************************************/
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
index 8b1a798..aa7aea1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
@@ -374,8 +374,9 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
if (WARN_ON(!rxb))
return;
+ rxcb.truesize = PAGE_SIZE << hw_params(trans).rx_page_order;
dma_unmap_page(trans->dev, rxb->page_dma,
- PAGE_SIZE << hw_params(trans).rx_page_order,
+ rxcb.truesize,
DMA_FROM_DEVICE);
rxcb._page = rxb->page;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 0c81cba..fdf9788 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -260,6 +260,7 @@ static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
struct iwl_rx_cmd_buffer {
struct page *_page;
+ unsigned int truesize;
};
static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 3fa1ece..2fa879b 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -103,7 +103,7 @@ static const u32 cipher_suites[] = {
* Convert NL80211's auth_type to the one from Libertas, see chapter 5.9.1
* in the firmware spec
*/
-static u8 lbs_auth_to_authtype(enum nl80211_auth_type auth_type)
+static int lbs_auth_to_authtype(enum nl80211_auth_type auth_type)
{
int ret = -ENOTSUPP;
@@ -1411,7 +1411,12 @@ static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev,
goto done;
}
- lbs_set_authtype(priv, sme);
+ ret = lbs_set_authtype(priv, sme);
+ if (ret == -ENOTSUPP) {
+ wiphy_err(wiphy, "unsupported authtype 0x%x\n", sme->auth_type);
+ goto done;
+ }
+
lbs_set_radio(priv, preamble, 1);
/* Do the actual association */
diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h
index 445ff21..2f218f9 100644
--- a/drivers/net/wireless/mwifiex/pcie.h
+++ b/drivers/net/wireless/mwifiex/pcie.h
@@ -48,15 +48,15 @@
#define PCIE_HOST_INT_STATUS_MASK 0xC3C
#define PCIE_SCRATCH_2_REG 0xC40
#define PCIE_SCRATCH_3_REG 0xC44
-#define PCIE_SCRATCH_4_REG 0xCC0
-#define PCIE_SCRATCH_5_REG 0xCC4
-#define PCIE_SCRATCH_6_REG 0xCC8
-#define PCIE_SCRATCH_7_REG 0xCCC
-#define PCIE_SCRATCH_8_REG 0xCD0
-#define PCIE_SCRATCH_9_REG 0xCD4
-#define PCIE_SCRATCH_10_REG 0xCD8
-#define PCIE_SCRATCH_11_REG 0xCDC
-#define PCIE_SCRATCH_12_REG 0xCE0
+#define PCIE_SCRATCH_4_REG 0xCD0
+#define PCIE_SCRATCH_5_REG 0xCD4
+#define PCIE_SCRATCH_6_REG 0xCD8
+#define PCIE_SCRATCH_7_REG 0xCDC
+#define PCIE_SCRATCH_8_REG 0xCE0
+#define PCIE_SCRATCH_9_REG 0xCE4
+#define PCIE_SCRATCH_10_REG 0xCE8
+#define PCIE_SCRATCH_11_REG 0xCEC
+#define PCIE_SCRATCH_12_REG 0xCF0
#define CPU_INTR_DNLD_RDY BIT(0)
#define CPU_INTR_DOOR_BELL BIT(1)
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 288b035..cc15fdb 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -1941,6 +1941,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
rtl_deinit_deferred_work(hw);
rtlpriv->intf_ops->adapter_stop(hw);
}
+ rtlpriv->cfg->ops->disable_interrupt(hw);
/*deinit rfkill */
rtl_deinit_rfkill(hw);
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c
index 41302c7..d1afb8e 100644
--- a/drivers/net/wireless/wl1251/main.c
+++ b/drivers/net/wireless/wl1251/main.c
@@ -479,6 +479,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
cancel_work_sync(&wl->irq_work);
cancel_work_sync(&wl->tx_work);
cancel_work_sync(&wl->filter_work);
+ cancel_delayed_work_sync(&wl->elp_work);
mutex_lock(&wl->mutex);
diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/wl1251/sdio.c
index f786942..1b851f6 100644
--- a/drivers/net/wireless/wl1251/sdio.c
+++ b/drivers/net/wireless/wl1251/sdio.c
@@ -315,8 +315,8 @@ static void __devexit wl1251_sdio_remove(struct sdio_func *func)
if (wl->irq)
free_irq(wl->irq, wl);
- kfree(wl_sdio);
wl1251_free_hw(wl);
+ kfree(wl_sdio);
sdio_claim_host(func);
sdio_release_irq(func);
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 8644d53..42cfcd9 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -44,6 +44,7 @@
#include <asm/ropes.h>
#include <asm/mckinley.h> /* for proc_mckinley_root */
#include <asm/runway.h> /* for proc_runway_root */
+#include <asm/page.h> /* for PAGE0 */
#include <asm/pdc.h> /* for PDC_MODEL_* */
#include <asm/pdcpat.h> /* for is_pdc_pat() */
#include <asm/parisc-device.h>
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 083a49f..165274c 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_UNICORE32) += setup-bus.o setup-irq.o
obj-$(CONFIG_PARISC) += setup-bus.o
obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o
obj-$(CONFIG_PPC) += setup-bus.o
+obj-$(CONFIG_FRV) += setup-bus.o
obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
obj-$(CONFIG_X86_VISWS) += setup-irq.o
obj-$(CONFIG_MN10300) += setup-bus.o
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 0f150f2..1929c0c 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -200,7 +200,7 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
return PCI_D1;
case ACPI_STATE_D2:
return PCI_D2;
- case ACPI_STATE_D3:
+ case ACPI_STATE_D3_HOT:
return PCI_D3hot;
case ACPI_STATE_D3_COLD:
return PCI_D3cold;
@@ -223,7 +223,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
[PCI_D0] = ACPI_STATE_D0,
[PCI_D1] = ACPI_STATE_D1,
[PCI_D2] = ACPI_STATE_D2,
- [PCI_D3hot] = ACPI_STATE_D3,
+ [PCI_D3hot] = ACPI_STATE_D3_HOT,
[PCI_D3cold] = ACPI_STATE_D3
};
int error = -EINVAL;
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index ec3b8cc..df6296c 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -908,10 +908,6 @@ static int pinctrl_groups_show(struct seq_file *s, void *what)
const struct pinctrl_ops *ops = pctldev->desc->pctlops;
unsigned selector = 0;
- /* No grouping */
- if (!ops)
- return 0;
-
mutex_lock(&pinctrl_mutex);
seq_puts(s, "registered pin groups:\n");
@@ -1225,6 +1221,19 @@ static void pinctrl_remove_device_debugfs(struct pinctrl_dev *pctldev)
#endif
+static int pinctrl_check_ops(struct pinctrl_dev *pctldev)
+{
+ const struct pinctrl_ops *ops = pctldev->desc->pctlops;
+
+ if (!ops ||
+ !ops->list_groups ||
+ !ops->get_group_name ||
+ !ops->get_group_pins)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* pinctrl_register() - register a pin controller device
* @pctldesc: descriptor for this pin controller
@@ -1256,6 +1265,14 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
INIT_LIST_HEAD(&pctldev->gpio_ranges);
pctldev->dev = dev;
+ /* check core ops for sanity */
+ ret = pinctrl_check_ops(pctldev);
+ if (ret) {
+ pr_err("%s pinctrl ops lacks necessary functions\n",
+ pctldesc->name);
+ goto out_err;
+ }
+
/* If we're implementing pinmuxing, check the ops for sanity */
if (pctldesc->pmxops) {
ret = pinmux_check_ops(pctldev);
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index bc8384c..639db4d 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -50,7 +50,7 @@
*/
#undef START_IN_KERNEL_MODE
-#define DRV_VER "0.5.24"
+#define DRV_VER "0.5.26"
/*
* According to the Atom N270 datasheet,
@@ -83,8 +83,8 @@ static int kernelmode;
#endif
static unsigned int interval = 10;
-static unsigned int fanon = 63000;
-static unsigned int fanoff = 58000;
+static unsigned int fanon = 60000;
+static unsigned int fanoff = 53000;
static unsigned int verbose;
static unsigned int fanstate = ACERHDF_FAN_AUTO;
static char force_bios[16];
@@ -150,6 +150,8 @@ static const struct bios_settings_t bios_tbl[] = {
{"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x00} },
{"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x00} },
{"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x00} },
+ /* LT1005u */
+ {"Acer", "LT-10Q", "v0.3310", 0x55, 0x58, {0x20, 0x00} },
/* Acer 1410 */
{"Acer", "Aspire 1410", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
{"Acer", "Aspire 1410", "v0.3113", 0x55, 0x58, {0x9e, 0x00} },
@@ -161,6 +163,7 @@ static const struct bios_settings_t bios_tbl[] = {
{"Acer", "Aspire 1410", "v1.3303", 0x55, 0x58, {0x9e, 0x00} },
{"Acer", "Aspire 1410", "v1.3308", 0x55, 0x58, {0x9e, 0x00} },
{"Acer", "Aspire 1410", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
+ {"Acer", "Aspire 1410", "v1.3314", 0x55, 0x58, {0x9e, 0x00} },
/* Acer 1810xx */
{"Acer", "Aspire 1810TZ", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
{"Acer", "Aspire 1810T", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
@@ -183,29 +186,44 @@ static const struct bios_settings_t bios_tbl[] = {
{"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
{"Acer", "Aspire 1810T", "v1.3310", 0x55, 0x58, {0x9e, 0x00} },
{"Acer", "Aspire 1810TZ", "v1.3314", 0x55, 0x58, {0x9e, 0x00} },
+ {"Acer", "Aspire 1810T", "v1.3314", 0x55, 0x58, {0x9e, 0x00} },
/* Acer 531 */
+ {"Acer", "AO531h", "v0.3104", 0x55, 0x58, {0x20, 0x00} },
{"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00} },
+ {"Acer", "AO531h", "v0.3304", 0x55, 0x58, {0x20, 0x00} },
+ /* Acer 751 */
+ {"Acer", "AO751h", "V0.3212", 0x55, 0x58, {0x21, 0x00} },
+ /* Acer 1825 */
+ {"Acer", "Aspire 1825PTZ", "V1.3118", 0x55, 0x58, {0x9e, 0x00} },
+ {"Acer", "Aspire 1825PTZ", "V1.3127", 0x55, 0x58, {0x9e, 0x00} },
+ /* Acer TravelMate 7730 */
+ {"Acer", "TravelMate 7730G", "v0.3509", 0x55, 0x58, {0xaf, 0x00} },
/* Gateway */
- {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00} },
- {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00} },
- {"Gateway", "LT31", "v1.3103", 0x55, 0x58, {0x9e, 0x00} },
- {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00} },
- {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00} },
+ {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00} },
+ {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00} },
+ {"Gateway", "LT31", "v1.3103", 0x55, 0x58, {0x9e, 0x00} },
+ {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00} },
+ {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00} },
+ {"Gateway", "LT31", "v1.3303t", 0x55, 0x58, {0x9e, 0x00} },
/* Packard Bell */
- {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00} },
- {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} },
- {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00} },
- {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} },
- {"Packard Bell", "DOTMU", "v1.3303", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v0.3120", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v0.3113", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v0.3115", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v0.3117", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v0.3119", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMU", "v1.3204", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMA", "v1.3201", 0x55, 0x58, {0x9e, 0x00} },
- {"Packard Bell", "DOTMA", "v1.3302", 0x55, 0x58, {0x9e, 0x00} },
+ {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00} },
+ {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} },
+ {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00} },
+ {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} },
+ {"Packard Bell", "ENBFT", "V1.3118", 0x55, 0x58, {0x9e, 0x00} },
+ {"Packard Bell", "ENBFT", "V1.3127", 0x55, 0x58, {0x9e, 0x00} },
+ {"Packard Bell", "DOTMU", "v1.3303", 0x55, 0x58, {0x9e, 0x00} },
+ {"Packard Bell", "DOTMU", "v0.3120", 0x55, 0x58, {0x9e, 0x00} },
+ {"Packard Bell", "DOTMU", "v0.3108", 0x55, 0x58, {0x9e, 0x00} },
+ {"Packard Bell", "DOTMU", "v0.3113", 0x55, 0x58, {0x9e, 0x00} },
+ {"Packard Bell", "DOTMU", "v0.3115", 0x55, 0x58, {0x9e, 0x00} },
+ {"Packard Bell", "DOTMU", "v0.3117", 0x55, 0x58, {0x9e, 0x00} },
+ {"Packard Bell", "DOTMU", "v0.3119", 0x55, 0x58, {0x9e, 0x00} },
+ {"Packard Bell", "DOTMU", "v1.3204", 0x55, 0x58, {0x9e, 0x00} },
+ {"Packard Bell", "DOTMA", "v1.3201", 0x55, 0x58, {0x9e, 0x00} },
+ {"Packard Bell", "DOTMA", "v1.3302", 0x55, 0x58, {0x9e, 0x00} },
+ {"Packard Bell", "DOTMA", "v1.3303t", 0x55, 0x58, {0x9e, 0x00} },
+ {"Packard Bell", "DOTVR46", "v1.3308", 0x55, 0x58, {0x9e, 0x00} },
/* pewpew-terminator */
{"", "", "", 0, 0, {0, 0} }
};
@@ -701,15 +719,20 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Peter Feuerer");
MODULE_DESCRIPTION("Aspire One temperature and fan driver");
MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAO751h*:");
MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1410*:");
MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1810*:");
+MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1825PTZ:");
MODULE_ALIAS("dmi:*:*Acer*:pnAO531*:");
+MODULE_ALIAS("dmi:*:*Acer*:TravelMate*7730G:");
MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:");
MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:");
MODULE_ALIAS("dmi:*:*Packard*Bell*:pnAOA*:");
MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOA*:");
MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMU*:");
+MODULE_ALIAS("dmi:*:*Packard*Bell*:pnENBFT*:");
MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMA*:");
+MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTVR46*:");
module_init(acerhdf_init);
module_exit(acerhdf_exit);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index a05fc9c..e6c08ee 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -212,6 +212,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = {
},
.driver_data = &quirk_dell_vostro_v130,
},
+ { }
};
static struct calling_interface_buffer *buffer;
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index f7ba316e..0ffdb3c 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -1565,7 +1565,7 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
ips->poll_turbo_status = true;
if (!ips_get_i915_syms(ips)) {
- dev_err(&dev->dev, "failed to get i915 symbols, graphics turbo disabled\n");
+ dev_info(&dev->dev, "failed to get i915 symbols, graphics turbo disabled until i915 loads\n");
ips->gpu_turbo_enabled = false;
} else {
dev_dbg(&dev->dev, "graphics turbo enabled\n");
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 0a3594c..bcbad84 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -78,7 +78,7 @@ static int __devinit mfld_pb_probe(struct platform_device *pdev)
input_set_capability(input, EV_KEY, KEY_POWER);
- error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0,
+ error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_NO_SUSPEND,
DRIVER_NAME, input);
if (error) {
dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index e70dd38..046fb1b 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1431,7 +1431,10 @@ void devm_regulator_put(struct regulator *regulator)
rc = devres_destroy(regulator->dev, devm_regulator_release,
devm_regulator_match, regulator);
- WARN_ON(rc);
+ if (rc == 0)
+ regulator_put(regulator);
+ else
+ WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_regulator_put);
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 9657929..17a58c5 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -684,7 +684,7 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
}
new_val++;
- } while (desc->min + desc->step + new_val <= desc->max);
+ } while (desc->min + desc->step * new_val <= desc->max);
new_idx = tmp_idx;
new_val = tmp_val;
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index cd188ab..c293d0c 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -902,6 +902,7 @@ read_rtc:
}
ds1307->nvram->attr.name = "nvram";
ds1307->nvram->attr.mode = S_IRUGO | S_IWUSR;
+ sysfs_bin_attr_init(ds1307->nvram);
ds1307->nvram->read = ds1307_nvram_read,
ds1307->nvram->write = ds1307_nvram_write,
ds1307->nvram->size = chip->nvram_size;
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index 42f5f82..029e421 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -360,12 +360,11 @@ static int __devinit mpc5121_rtc_probe(struct platform_device *op)
&mpc5200_rtc_ops, THIS_MODULE);
}
- rtc->rtc->uie_unsupported = 1;
-
if (IS_ERR(rtc->rtc)) {
err = PTR_ERR(rtc->rtc);
goto out_free_irq;
}
+ rtc->rtc->uie_unsupported = 1;
return 0;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 120955c..8334dad 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1672,7 +1672,8 @@ static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
{
QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
- if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && prcd[76] == 0xF5) {
+ if (prcd[74] == 0xF0 && prcd[75] == 0xF0 &&
+ (prcd[76] == 0xF5 || prcd[76] == 0xF6)) {
card->info.blkt.time_total = 250;
card->info.blkt.inter_packet = 5;
card->info.blkt.inter_packet_jumbo = 15;
@@ -4540,7 +4541,8 @@ static void qeth_determine_capabilities(struct qeth_card *card)
goto out_offline;
}
qeth_configure_unitaddr(card, prcd);
- qeth_configure_blkt_default(card, prcd);
+ if (ddev_offline)
+ qeth_configure_blkt_default(card, prcd);
kfree(prcd);
rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 351dc0b..a3a056a 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -218,6 +218,9 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
if (!shost->shost_gendev.parent)
shost->shost_gendev.parent = dev ? dev : &platform_bus;
+ if (!dma_dev)
+ dma_dev = shost->shost_gendev.parent;
+
shost->dma_dev = dma_dev;
error = device_add(&shost->shost_gendev);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index e002cd4..467dc38 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4549,8 +4549,12 @@ static int ipr_ata_slave_alloc(struct scsi_device *sdev)
ENTER;
if (sdev->sdev_target)
sata_port = sdev->sdev_target->hostdata;
- if (sata_port)
+ if (sata_port) {
rc = ata_sas_port_init(sata_port->ap);
+ if (rc == 0)
+ rc = ata_sas_sync_probe(sata_port->ap);
+ }
+
if (rc)
ipr_slave_destroy(sdev);
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index d4bf9c1..45385f5 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -192,22 +192,27 @@ static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
static bool sci_controller_isr(struct isci_host *ihost)
{
- if (sci_controller_completion_queue_has_entries(ihost)) {
+ if (sci_controller_completion_queue_has_entries(ihost))
return true;
- } else {
- /*
- * we have a spurious interrupt it could be that we have already
- * emptied the completion queue from a previous interrupt */
- writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
- /*
- * There is a race in the hardware that could cause us not to be notified
- * of an interrupt completion if we do not take this step. We will mask
- * then unmask the interrupts so if there is another interrupt pending
- * the clearing of the interrupt source we get the next interrupt message. */
+ /* we have a spurious interrupt it could be that we have already
+ * emptied the completion queue from a previous interrupt
+ * FIXME: really!?
+ */
+ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+
+ /* There is a race in the hardware that could cause us not to be
+ * notified of an interrupt completion if we do not take this
+ * step. We will mask then unmask the interrupts so if there is
+ * another interrupt pending the clearing of the interrupt
+ * source we get the next interrupt message.
+ */
+ spin_lock(&ihost->scic_lock);
+ if (test_bit(IHOST_IRQ_ENABLED, &ihost->flags)) {
writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
writel(0, &ihost->smu_registers->interrupt_mask);
}
+ spin_unlock(&ihost->scic_lock);
return false;
}
@@ -642,7 +647,6 @@ static void isci_host_start_complete(struct isci_host *ihost, enum sci_status co
if (completion_status != SCI_SUCCESS)
dev_info(&ihost->pdev->dev,
"controller start timed out, continuing...\n");
- isci_host_change_state(ihost, isci_ready);
clear_bit(IHOST_START_PENDING, &ihost->flags);
wake_up(&ihost->eventq);
}
@@ -657,12 +661,7 @@ int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
sas_drain_work(ha);
- dev_dbg(&ihost->pdev->dev,
- "%s: ihost->status = %d, time = %ld\n",
- __func__, isci_host_get_state(ihost), time);
-
return 1;
-
}
/**
@@ -704,14 +703,15 @@ static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
static void sci_controller_enable_interrupts(struct isci_host *ihost)
{
- BUG_ON(ihost->smu_registers == NULL);
+ set_bit(IHOST_IRQ_ENABLED, &ihost->flags);
writel(0, &ihost->smu_registers->interrupt_mask);
}
void sci_controller_disable_interrupts(struct isci_host *ihost)
{
- BUG_ON(ihost->smu_registers == NULL);
+ clear_bit(IHOST_IRQ_ENABLED, &ihost->flags);
writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
+ readl(&ihost->smu_registers->interrupt_mask); /* flush */
}
static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
@@ -822,7 +822,7 @@ static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *
&ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
}
-static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
+void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
{
if (ihost->sm.current_state_id == SCIC_STARTING) {
/*
@@ -849,6 +849,7 @@ static bool is_phy_starting(struct isci_phy *iphy)
case SCI_PHY_SUB_AWAIT_SATA_POWER:
case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_OSSP_EN:
case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
case SCI_PHY_SUB_FINAL:
return true;
@@ -857,6 +858,39 @@ static bool is_phy_starting(struct isci_phy *iphy)
}
}
+bool is_controller_start_complete(struct isci_host *ihost)
+{
+ int i;
+
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ struct isci_phy *iphy = &ihost->phys[i];
+ u32 state = iphy->sm.current_state_id;
+
+ /* in apc mode we need to check every phy, in
+ * mpc mode we only need to check phys that have
+ * been configured into a port
+ */
+ if (is_port_config_apc(ihost))
+ /* pass */;
+ else if (!phy_get_non_dummy_port(iphy))
+ continue;
+
+ /* The controller start operation is complete iff:
+ * - all links have been given an opportunity to start
+ * - have no indication of a connected device
+ * - have an indication of a connected device and it has
+ * finished the link training process.
+ */
+ if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
+ (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
+ (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
+ (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask))
+ return false;
+ }
+
+ return true;
+}
+
/**
* sci_controller_start_next_phy - start phy
* @scic: controller
@@ -877,36 +911,7 @@ static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
return status;
if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
- bool is_controller_start_complete = true;
- u32 state;
- u8 index;
-
- for (index = 0; index < SCI_MAX_PHYS; index++) {
- iphy = &ihost->phys[index];
- state = iphy->sm.current_state_id;
-
- if (!phy_get_non_dummy_port(iphy))
- continue;
-
- /* The controller start operation is complete iff:
- * - all links have been given an opportunity to start
- * - have no indication of a connected device
- * - have an indication of a connected device and it has
- * finished the link training process.
- */
- if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
- (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
- (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
- (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) {
- is_controller_start_complete = false;
- break;
- }
- }
-
- /*
- * The controller has successfully finished the start process.
- * Inform the SCI Core user and transition to the READY state. */
- if (is_controller_start_complete == true) {
+ if (is_controller_start_complete(ihost)) {
sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
sci_del_timer(&ihost->phy_timer);
ihost->phy_startup_timer_pending = false;
@@ -987,9 +992,8 @@ static enum sci_status sci_controller_start(struct isci_host *ihost,
u16 index;
if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
- dev_warn(&ihost->pdev->dev,
- "SCIC Controller start operation requested in "
- "invalid state\n");
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
return SCI_FAILURE_INVALID_STATE;
}
@@ -1053,9 +1057,8 @@ void isci_host_scan_start(struct Scsi_Host *shost)
spin_unlock_irq(&ihost->scic_lock);
}
-static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
+static void isci_host_stop_complete(struct isci_host *ihost)
{
- isci_host_change_state(ihost, isci_stopped);
sci_controller_disable_interrupts(ihost);
clear_bit(IHOST_STOP_PENDING, &ihost->flags);
wake_up(&ihost->eventq);
@@ -1074,6 +1077,32 @@ static void sci_controller_completion_handler(struct isci_host *ihost)
writel(0, &ihost->smu_registers->interrupt_mask);
}
+void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task)
+{
+ task->lldd_task = NULL;
+ if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags) &&
+ !(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+ if (test_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags)) {
+ /* Normal notification (task_done) */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: Normal - ireq/task = %p/%p\n",
+ __func__, ireq, task);
+
+ task->task_done(task);
+ } else {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: Error - ireq/task = %p/%p\n",
+ __func__, ireq, task);
+
+ sas_task_abort(task);
+ }
+ }
+ if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
+ wake_up_all(&ihost->eventq);
+
+ if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags))
+ isci_free_tag(ihost, ireq->io_tag);
+}
/**
* isci_host_completion_routine() - This function is the delayed service
* routine that calls the sci core library's completion handler. It's
@@ -1082,107 +1111,15 @@ static void sci_controller_completion_handler(struct isci_host *ihost)
* @data: This parameter specifies the ISCI host object
*
*/
-static void isci_host_completion_routine(unsigned long data)
+void isci_host_completion_routine(unsigned long data)
{
struct isci_host *ihost = (struct isci_host *)data;
- struct list_head completed_request_list;
- struct list_head errored_request_list;
- struct list_head *current_position;
- struct list_head *next_position;
- struct isci_request *request;
- struct isci_request *next_request;
- struct sas_task *task;
u16 active;
- INIT_LIST_HEAD(&completed_request_list);
- INIT_LIST_HEAD(&errored_request_list);
-
spin_lock_irq(&ihost->scic_lock);
-
sci_controller_completion_handler(ihost);
-
- /* Take the lists of completed I/Os from the host. */
-
- list_splice_init(&ihost->requests_to_complete,
- &completed_request_list);
-
- /* Take the list of errored I/Os from the host. */
- list_splice_init(&ihost->requests_to_errorback,
- &errored_request_list);
-
spin_unlock_irq(&ihost->scic_lock);
- /* Process any completions in the lists. */
- list_for_each_safe(current_position, next_position,
- &completed_request_list) {
-
- request = list_entry(current_position, struct isci_request,
- completed_node);
- task = isci_request_access_task(request);
-
- /* Normal notification (task_done) */
- dev_dbg(&ihost->pdev->dev,
- "%s: Normal - request/task = %p/%p\n",
- __func__,
- request,
- task);
-
- /* Return the task to libsas */
- if (task != NULL) {
-
- task->lldd_task = NULL;
- if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
-
- /* If the task is already in the abort path,
- * the task_done callback cannot be called.
- */
- task->task_done(task);
- }
- }
-
- spin_lock_irq(&ihost->scic_lock);
- isci_free_tag(ihost, request->io_tag);
- spin_unlock_irq(&ihost->scic_lock);
- }
- list_for_each_entry_safe(request, next_request, &errored_request_list,
- completed_node) {
-
- task = isci_request_access_task(request);
-
- /* Use sas_task_abort */
- dev_warn(&ihost->pdev->dev,
- "%s: Error - request/task = %p/%p\n",
- __func__,
- request,
- task);
-
- if (task != NULL) {
-
- /* Put the task into the abort path if it's not there
- * already.
- */
- if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
- sas_task_abort(task);
-
- } else {
- /* This is a case where the request has completed with a
- * status such that it needed further target servicing,
- * but the sas_task reference has already been removed
- * from the request. Since it was errored, it was not
- * being aborted, so there is nothing to do except free
- * it.
- */
-
- spin_lock_irq(&ihost->scic_lock);
- /* Remove the request from the remote device's list
- * of pending requests.
- */
- list_del_init(&request->dev_node);
- isci_free_tag(ihost, request->io_tag);
- spin_unlock_irq(&ihost->scic_lock);
- }
- }
-
/* the coalesence timeout doubles at each encoding step, so
* update it based on the ilog2 value of the outstanding requests
*/
@@ -1213,9 +1150,8 @@ static void isci_host_completion_routine(unsigned long data)
static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
{
if (ihost->sm.current_state_id != SCIC_READY) {
- dev_warn(&ihost->pdev->dev,
- "SCIC Controller stop operation requested in "
- "invalid state\n");
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
return SCI_FAILURE_INVALID_STATE;
}
@@ -1241,7 +1177,7 @@ static enum sci_status sci_controller_reset(struct isci_host *ihost)
switch (ihost->sm.current_state_id) {
case SCIC_RESET:
case SCIC_READY:
- case SCIC_STOPPED:
+ case SCIC_STOPPING:
case SCIC_FAILED:
/*
* The reset operation is not a graceful cleanup, just
@@ -1250,13 +1186,50 @@ static enum sci_status sci_controller_reset(struct isci_host *ihost)
sci_change_state(&ihost->sm, SCIC_RESETTING);
return SCI_SUCCESS;
default:
- dev_warn(&ihost->pdev->dev,
- "SCIC Controller reset operation requested in "
- "invalid state\n");
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
return SCI_FAILURE_INVALID_STATE;
}
}
+static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
+{
+ u32 index;
+ enum sci_status status;
+ enum sci_status phy_status;
+
+ status = SCI_SUCCESS;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ phy_status = sci_phy_stop(&ihost->phys[index]);
+
+ if (phy_status != SCI_SUCCESS &&
+ phy_status != SCI_FAILURE_INVALID_STATE) {
+ status = SCI_FAILURE;
+
+ dev_warn(&ihost->pdev->dev,
+ "%s: Controller stop operation failed to stop "
+ "phy %d because of status %d.\n",
+ __func__,
+ ihost->phys[index].phy_index, phy_status);
+ }
+ }
+
+ return status;
+}
+
+
+/**
+ * isci_host_deinit - shutdown frame reception and dma
+ * @ihost: host to take down
+ *
+ * This is called in either the driver shutdown or the suspend path. In
+ * the shutdown case libsas went through port teardown and normal device
+ * removal (i.e. physical links stayed up to service scsi_device removal
+ * commands). In the suspend case we disable the hardware without
+ * notifying libsas of the link down events since we want libsas to
+ * remember the domain across the suspend/resume cycle
+ */
void isci_host_deinit(struct isci_host *ihost)
{
int i;
@@ -1265,17 +1238,6 @@ void isci_host_deinit(struct isci_host *ihost)
for (i = 0; i < isci_gpio_count(ihost); i++)
writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
- isci_host_change_state(ihost, isci_stopping);
- for (i = 0; i < SCI_MAX_PORTS; i++) {
- struct isci_port *iport = &ihost->ports[i];
- struct isci_remote_device *idev, *d;
-
- list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
- if (test_bit(IDEV_ALLOCATED, &idev->flags))
- isci_remote_device_stop(ihost, idev);
- }
- }
-
set_bit(IHOST_STOP_PENDING, &ihost->flags);
spin_lock_irq(&ihost->scic_lock);
@@ -1284,12 +1246,21 @@ void isci_host_deinit(struct isci_host *ihost)
wait_for_stop(ihost);
+ /* phy stop is after controller stop to allow port and device to
+ * go idle before shutting down the phys, but the expectation is
+ * that i/o has been shut off well before we reach this
+ * function.
+ */
+ sci_controller_stop_phys(ihost);
+
/* disable sgpio: where the above wait should give time for the
* enclosure to sample the gpios going inactive
*/
writel(0, &ihost->scu_registers->peg0.sgpio.interface_control);
+ spin_lock_irq(&ihost->scic_lock);
sci_controller_reset(ihost);
+ spin_unlock_irq(&ihost->scic_lock);
/* Cancel any/all outstanding port timers */
for (i = 0; i < ihost->logical_port_entries; i++) {
@@ -1328,29 +1299,6 @@ static void __iomem *smu_base(struct isci_host *isci_host)
return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
}
-static void isci_user_parameters_get(struct sci_user_parameters *u)
-{
- int i;
-
- for (i = 0; i < SCI_MAX_PHYS; i++) {
- struct sci_phy_user_params *u_phy = &u->phys[i];
-
- u_phy->max_speed_generation = phy_gen;
-
- /* we are not exporting these for now */
- u_phy->align_insertion_frequency = 0x7f;
- u_phy->in_connection_align_insertion_frequency = 0xff;
- u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
- }
-
- u->stp_inactivity_timeout = stp_inactive_to;
- u->ssp_inactivity_timeout = ssp_inactive_to;
- u->stp_max_occupancy_timeout = stp_max_occ_to;
- u->ssp_max_occupancy_timeout = ssp_max_occ_to;
- u->no_outbound_task_timeout = no_outbound_task_to;
- u->max_concurr_spinup = max_concurr_spinup;
-}
-
static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
{
struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
@@ -1510,32 +1458,6 @@ static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
sci_controller_set_interrupt_coalescence(ihost, 0, 0);
}
-static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
-{
- u32 index;
- enum sci_status status;
- enum sci_status phy_status;
-
- status = SCI_SUCCESS;
-
- for (index = 0; index < SCI_MAX_PHYS; index++) {
- phy_status = sci_phy_stop(&ihost->phys[index]);
-
- if (phy_status != SCI_SUCCESS &&
- phy_status != SCI_FAILURE_INVALID_STATE) {
- status = SCI_FAILURE;
-
- dev_warn(&ihost->pdev->dev,
- "%s: Controller stop operation failed to stop "
- "phy %d because of status %d.\n",
- __func__,
- ihost->phys[index].phy_index, phy_status);
- }
- }
-
- return status;
-}
-
static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
{
u32 index;
@@ -1595,10 +1517,11 @@ static void sci_controller_stopping_state_enter(struct sci_base_state_machine *s
{
struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
- /* Stop all of the components for this controller */
- sci_controller_stop_phys(ihost);
- sci_controller_stop_ports(ihost);
sci_controller_stop_devices(ihost);
+ sci_controller_stop_ports(ihost);
+
+ if (!sci_controller_has_remote_devices_stopping(ihost))
+ isci_host_stop_complete(ihost);
}
static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
@@ -1624,6 +1547,9 @@ static void sci_controller_reset_hardware(struct isci_host *ihost)
/* The write to the UFQGP clears the UFQPR */
writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+
+ /* clear all interrupts */
+ writel(~SMU_INTERRUPT_STATUS_RESERVED_MASK, &ihost->smu_registers->interrupt_status);
}
static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
@@ -1655,59 +1581,9 @@ static const struct sci_base_state sci_controller_state_table[] = {
.enter_state = sci_controller_stopping_state_enter,
.exit_state = sci_controller_stopping_state_exit,
},
- [SCIC_STOPPED] = {},
[SCIC_FAILED] = {}
};
-static void sci_controller_set_default_config_parameters(struct isci_host *ihost)
-{
- /* these defaults are overridden by the platform / firmware */
- u16 index;
-
- /* Default to APC mode. */
- ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
-
- /* Default to APC mode. */
- ihost->oem_parameters.controller.max_concurr_spin_up = 1;
-
- /* Default to no SSC operation. */
- ihost->oem_parameters.controller.do_enable_ssc = false;
-
- /* Default to short cables on all phys. */
- ihost->oem_parameters.controller.cable_selection_mask = 0;
-
- /* Initialize all of the port parameter information to narrow ports. */
- for (index = 0; index < SCI_MAX_PORTS; index++) {
- ihost->oem_parameters.ports[index].phy_mask = 0;
- }
-
- /* Initialize all of the phy parameter information. */
- for (index = 0; index < SCI_MAX_PHYS; index++) {
- /* Default to 3G (i.e. Gen 2). */
- ihost->user_parameters.phys[index].max_speed_generation =
- SCIC_SDS_PARM_GEN2_SPEED;
-
- /* the frequencies cannot be 0 */
- ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
- ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff;
- ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
-
- /*
- * Previous Vitesse based expanders had a arbitration issue that
- * is worked around by having the upper 32-bits of SAS address
- * with a value greater then the Vitesse company identifier.
- * Hence, usage of 0x5FCFFFFF. */
- ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id;
- ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF;
- }
-
- ihost->user_parameters.stp_inactivity_timeout = 5;
- ihost->user_parameters.ssp_inactivity_timeout = 5;
- ihost->user_parameters.stp_max_occupancy_timeout = 5;
- ihost->user_parameters.ssp_max_occupancy_timeout = 20;
- ihost->user_parameters.no_outbound_task_timeout = 2;
-}
-
static void controller_timeout(unsigned long data)
{
struct sci_timer *tmr = (struct sci_timer *)data;
@@ -1724,7 +1600,7 @@ static void controller_timeout(unsigned long data)
sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
else if (sm->current_state_id == SCIC_STOPPING) {
sci_change_state(sm, SCIC_FAILED);
- isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
+ isci_host_stop_complete(ihost);
} else /* / @todo Now what do we want to do in this case? */
dev_err(&ihost->pdev->dev,
"%s: Controller timer fired when controller was not "
@@ -1764,9 +1640,6 @@ static enum sci_status sci_controller_construct(struct isci_host *ihost,
sci_init_timer(&ihost->timer, controller_timeout);
- /* Initialize the User and OEM parameters to default values. */
- sci_controller_set_default_config_parameters(ihost);
-
return sci_controller_reset(ihost);
}
@@ -1846,27 +1719,6 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
return 0;
}
-static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
-{
- u32 state = ihost->sm.current_state_id;
- struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
-
- if (state == SCIC_RESET ||
- state == SCIC_INITIALIZING ||
- state == SCIC_INITIALIZED) {
- u8 oem_version = pci_info->orom ? pci_info->orom->hdr.version :
- ISCI_ROM_VER_1_0;
-
- if (sci_oem_parameters_validate(&ihost->oem_parameters,
- oem_version))
- return SCI_FAILURE_INVALID_PARAMETER_VALUE;
-
- return SCI_SUCCESS;
- }
-
- return SCI_FAILURE_INVALID_STATE;
-}
-
static u8 max_spin_up(struct isci_host *ihost)
{
if (ihost->user_parameters.max_concurr_spinup)
@@ -1914,7 +1766,7 @@ static void power_control_timeout(unsigned long data)
ihost->power_control.phys_granted_power++;
sci_phy_consume_power_handler(iphy);
- if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+ if (iphy->protocol == SAS_PROTOCOL_SSP) {
u8 j;
for (j = 0; j < SCI_MAX_PHYS; j++) {
@@ -1988,7 +1840,7 @@ void sci_controller_power_control_queue_insert(struct isci_host *ihost,
sizeof(current_phy->frame_rcvd.iaf.sas_addr));
if (current_phy->sm.current_state_id == SCI_PHY_READY &&
- current_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS &&
+ current_phy->protocol == SAS_PROTOCOL_SSP &&
other == 0) {
sci_phy_consume_power_handler(iphy);
break;
@@ -2279,9 +2131,8 @@ static enum sci_status sci_controller_initialize(struct isci_host *ihost)
unsigned long i, state, val;
if (ihost->sm.current_state_id != SCIC_RESET) {
- dev_warn(&ihost->pdev->dev,
- "SCIC Controller initialize operation requested "
- "in invalid state\n");
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
return SCI_FAILURE_INVALID_STATE;
}
@@ -2384,96 +2235,76 @@ static enum sci_status sci_controller_initialize(struct isci_host *ihost)
return result;
}
-static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
- struct sci_user_parameters *sci_parms)
-{
- u32 state = ihost->sm.current_state_id;
-
- if (state == SCIC_RESET ||
- state == SCIC_INITIALIZING ||
- state == SCIC_INITIALIZED) {
- u16 index;
-
- /*
- * Validate the user parameters. If they are not legal, then
- * return a failure.
- */
- for (index = 0; index < SCI_MAX_PHYS; index++) {
- struct sci_phy_user_params *user_phy;
-
- user_phy = &sci_parms->phys[index];
-
- if (!((user_phy->max_speed_generation <=
- SCIC_SDS_PARM_MAX_SPEED) &&
- (user_phy->max_speed_generation >
- SCIC_SDS_PARM_NO_SPEED)))
- return SCI_FAILURE_INVALID_PARAMETER_VALUE;
-
- if (user_phy->in_connection_align_insertion_frequency <
- 3)
- return SCI_FAILURE_INVALID_PARAMETER_VALUE;
-
- if ((user_phy->in_connection_align_insertion_frequency <
- 3) ||
- (user_phy->align_insertion_frequency == 0) ||
- (user_phy->
- notify_enable_spin_up_insertion_frequency ==
- 0))
- return SCI_FAILURE_INVALID_PARAMETER_VALUE;
- }
-
- if ((sci_parms->stp_inactivity_timeout == 0) ||
- (sci_parms->ssp_inactivity_timeout == 0) ||
- (sci_parms->stp_max_occupancy_timeout == 0) ||
- (sci_parms->ssp_max_occupancy_timeout == 0) ||
- (sci_parms->no_outbound_task_timeout == 0))
- return SCI_FAILURE_INVALID_PARAMETER_VALUE;
-
- memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
-
- return SCI_SUCCESS;
- }
-
- return SCI_FAILURE_INVALID_STATE;
-}
-
-static int sci_controller_mem_init(struct isci_host *ihost)
+static int sci_controller_dma_alloc(struct isci_host *ihost)
{
struct device *dev = &ihost->pdev->dev;
- dma_addr_t dma;
size_t size;
- int err;
+ int i;
+
+ /* detect re-initialization */
+ if (ihost->completion_queue)
+ return 0;
size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
- ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
+ ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma,
+ GFP_KERNEL);
if (!ihost->completion_queue)
return -ENOMEM;
- writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower);
- writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper);
-
size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
- ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma,
+ ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma,
GFP_KERNEL);
+
if (!ihost->remote_node_context_table)
return -ENOMEM;
- writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower);
- writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper);
-
size = ihost->task_context_entries * sizeof(struct scu_task_context),
- ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
+ ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma,
+ GFP_KERNEL);
if (!ihost->task_context_table)
return -ENOMEM;
- ihost->task_context_dma = dma;
- writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower);
- writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper);
+ size = SCI_UFI_TOTAL_SIZE;
+ ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL);
+ if (!ihost->ufi_buf)
+ return -ENOMEM;
+
+ for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
+ struct isci_request *ireq;
+ dma_addr_t dma;
+
+ ireq = dmam_alloc_coherent(dev, sizeof(*ireq), &dma, GFP_KERNEL);
+ if (!ireq)
+ return -ENOMEM;
+
+ ireq->tc = &ihost->task_context_table[i];
+ ireq->owning_controller = ihost;
+ ireq->request_daddr = dma;
+ ireq->isci_host = ihost;
+ ihost->reqs[i] = ireq;
+ }
+
+ return 0;
+}
+
+static int sci_controller_mem_init(struct isci_host *ihost)
+{
+ int err = sci_controller_dma_alloc(ihost);
- err = sci_unsolicited_frame_control_construct(ihost);
if (err)
return err;
+ writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower);
+ writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper);
+
+ writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower);
+ writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper);
+
+ writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower);
+ writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper);
+
+ sci_unsolicited_frame_control_construct(ihost);
+
/*
* Inform the silicon as to the location of the UF headers and
* address table.
@@ -2491,22 +2322,22 @@ static int sci_controller_mem_init(struct isci_host *ihost)
return 0;
}
+/**
+ * isci_host_init - (re-)initialize hardware and internal (private) state
+ * @ihost: host to init
+ *
+ * Any public facing objects (like asd_sas_port, and asd_sas_phys), or
+ * one-time initialization objects like locks and waitqueues, are
+ * not touched (they are initialized in isci_host_alloc)
+ */
int isci_host_init(struct isci_host *ihost)
{
- int err = 0, i;
+ int i, err;
enum sci_status status;
- struct sci_user_parameters sci_user_params;
- struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
-
- spin_lock_init(&ihost->state_lock);
- spin_lock_init(&ihost->scic_lock);
- init_waitqueue_head(&ihost->eventq);
-
- isci_host_change_state(ihost, isci_starting);
-
- status = sci_controller_construct(ihost, scu_base(ihost),
- smu_base(ihost));
+ spin_lock_irq(&ihost->scic_lock);
+ status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost));
+ spin_unlock_irq(&ihost->scic_lock);
if (status != SCI_SUCCESS) {
dev_err(&ihost->pdev->dev,
"%s: sci_controller_construct failed - status = %x\n",
@@ -2515,48 +2346,6 @@ int isci_host_init(struct isci_host *ihost)
return -ENODEV;
}
- ihost->sas_ha.dev = &ihost->pdev->dev;
- ihost->sas_ha.lldd_ha = ihost;
-
- /*
- * grab initial values stored in the controller object for OEM and USER
- * parameters
- */
- isci_user_parameters_get(&sci_user_params);
- status = sci_user_parameters_set(ihost, &sci_user_params);
- if (status != SCI_SUCCESS) {
- dev_warn(&ihost->pdev->dev,
- "%s: sci_user_parameters_set failed\n",
- __func__);
- return -ENODEV;
- }
-
- /* grab any OEM parameters specified in orom */
- if (pci_info->orom) {
- status = isci_parse_oem_parameters(&ihost->oem_parameters,
- pci_info->orom,
- ihost->id);
- if (status != SCI_SUCCESS) {
- dev_warn(&ihost->pdev->dev,
- "parsing firmware oem parameters failed\n");
- return -EINVAL;
- }
- }
-
- status = sci_oem_parameters_set(ihost);
- if (status != SCI_SUCCESS) {
- dev_warn(&ihost->pdev->dev,
- "%s: sci_oem_parameters_set failed\n",
- __func__);
- return -ENODEV;
- }
-
- tasklet_init(&ihost->completion_tasklet,
- isci_host_completion_routine, (unsigned long)ihost);
-
- INIT_LIST_HEAD(&ihost->requests_to_complete);
- INIT_LIST_HEAD(&ihost->requests_to_errorback);
-
spin_lock_irq(&ihost->scic_lock);
status = sci_controller_initialize(ihost);
spin_unlock_irq(&ihost->scic_lock);
@@ -2572,43 +2361,12 @@ int isci_host_init(struct isci_host *ihost)
if (err)
return err;
- for (i = 0; i < SCI_MAX_PORTS; i++)
- isci_port_init(&ihost->ports[i], ihost, i);
-
- for (i = 0; i < SCI_MAX_PHYS; i++)
- isci_phy_init(&ihost->phys[i], ihost, i);
-
/* enable sgpio */
writel(1, &ihost->scu_registers->peg0.sgpio.interface_control);
for (i = 0; i < isci_gpio_count(ihost); i++)
writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code);
- for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
- struct isci_remote_device *idev = &ihost->devices[i];
-
- INIT_LIST_HEAD(&idev->reqs_in_process);
- INIT_LIST_HEAD(&idev->node);
- }
-
- for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
- struct isci_request *ireq;
- dma_addr_t dma;
-
- ireq = dmam_alloc_coherent(&ihost->pdev->dev,
- sizeof(struct isci_request), &dma,
- GFP_KERNEL);
- if (!ireq)
- return -ENOMEM;
-
- ireq->tc = &ihost->task_context_table[i];
- ireq->owning_controller = ihost;
- spin_lock_init(&ireq->state_lock);
- ireq->request_daddr = dma;
- ireq->isci_host = ihost;
- ihost->reqs[i] = ireq;
- }
-
return 0;
}
@@ -2654,7 +2412,7 @@ void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
}
}
-static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
+bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
{
u32 index;
@@ -2680,7 +2438,7 @@ void sci_controller_remote_device_stopped(struct isci_host *ihost,
}
if (!sci_controller_has_remote_devices_stopping(ihost))
- sci_change_state(&ihost->sm, SCIC_STOPPED);
+ isci_host_stop_complete(ihost);
}
void sci_controller_post_request(struct isci_host *ihost, u32 request)
@@ -2842,7 +2600,8 @@ enum sci_status sci_controller_start_io(struct isci_host *ihost,
enum sci_status status;
if (ihost->sm.current_state_id != SCIC_READY) {
- dev_warn(&ihost->pdev->dev, "invalid state to start I/O");
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
return SCI_FAILURE_INVALID_STATE;
}
@@ -2866,22 +2625,26 @@ enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
enum sci_status status;
if (ihost->sm.current_state_id != SCIC_READY) {
- dev_warn(&ihost->pdev->dev,
- "invalid state to terminate request\n");
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
return SCI_FAILURE_INVALID_STATE;
}
-
status = sci_io_request_terminate(ireq);
- if (status != SCI_SUCCESS)
- return status;
- /*
- * Utilize the original post context command and or in the POST_TC_ABORT
- * request sub-type.
- */
- sci_controller_post_request(ihost,
- ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
- return SCI_SUCCESS;
+ dev_dbg(&ihost->pdev->dev, "%s: status=%d; ireq=%p; flags=%lx\n",
+ __func__, status, ireq, ireq->flags);
+
+ if ((status == SCI_SUCCESS) &&
+ !test_bit(IREQ_PENDING_ABORT, &ireq->flags) &&
+ !test_and_set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags)) {
+ /* Utilize the original post context command and or in the
+ * POST_TC_ABORT request sub-type.
+ */
+ sci_controller_post_request(
+ ihost, ireq->post_context |
+ SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
+ }
+ return status;
}
/**
@@ -2915,7 +2678,8 @@ enum sci_status sci_controller_complete_io(struct isci_host *ihost,
clear_bit(IREQ_ACTIVE, &ireq->flags);
return SCI_SUCCESS;
default:
- dev_warn(&ihost->pdev->dev, "invalid state to complete I/O");
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
return SCI_FAILURE_INVALID_STATE;
}
@@ -2926,7 +2690,8 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq)
struct isci_host *ihost = ireq->owning_controller;
if (ihost->sm.current_state_id != SCIC_READY) {
- dev_warn(&ihost->pdev->dev, "invalid state to continue I/O");
+ dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n",
+ __func__, ihost->sm.current_state_id);
return SCI_FAILURE_INVALID_STATE;
}
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index adbad69..9ab58e0 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -55,6 +55,7 @@
#ifndef _SCI_HOST_H_
#define _SCI_HOST_H_
+#include <scsi/sas_ata.h>
#include "remote_device.h"
#include "phy.h"
#include "isci.h"
@@ -108,6 +109,8 @@ struct sci_port_configuration_agent;
typedef void (*port_config_fn)(struct isci_host *,
struct sci_port_configuration_agent *,
struct isci_port *, struct isci_phy *);
+bool is_port_config_apc(struct isci_host *ihost);
+bool is_controller_start_complete(struct isci_host *ihost);
struct sci_port_configuration_agent {
u16 phy_configured_mask;
@@ -157,13 +160,17 @@ struct isci_host {
struct sci_power_control power_control;
u8 io_request_sequence[SCI_MAX_IO_REQUESTS];
struct scu_task_context *task_context_table;
- dma_addr_t task_context_dma;
+ dma_addr_t tc_dma;
union scu_remote_node_context *remote_node_context_table;
+ dma_addr_t rnc_dma;
u32 *completion_queue;
+ dma_addr_t cq_dma;
u32 completion_queue_get;
u32 logical_port_entries;
u32 remote_node_entries;
u32 task_context_entries;
+ void *ufi_buf;
+ dma_addr_t ufi_dma;
struct sci_unsolicited_frame_control uf_control;
/* phy startup */
@@ -190,17 +197,13 @@ struct isci_host {
struct asd_sas_port sas_ports[SCI_MAX_PORTS];
struct sas_ha_struct sas_ha;
- spinlock_t state_lock;
struct pci_dev *pdev;
- enum isci_status status;
#define IHOST_START_PENDING 0
#define IHOST_STOP_PENDING 1
+ #define IHOST_IRQ_ENABLED 2
unsigned long flags;
wait_queue_head_t eventq;
- struct Scsi_Host *shost;
struct tasklet_struct completion_tasklet;
- struct list_head requests_to_complete;
- struct list_head requests_to_errorback;
spinlock_t scic_lock;
struct isci_request *reqs[SCI_MAX_IO_REQUESTS];
struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES];
@@ -274,13 +277,6 @@ enum sci_controller_states {
SCIC_STOPPING,
/**
- * This state indicates that the controller has successfully been stopped.
- * In this state no new IO operations are permitted.
- * This state is entered from the STOPPING state.
- */
- SCIC_STOPPED,
-
- /**
* This state indicates that the controller could not successfully be
* initialized. In this state no new IO operations are permitted.
* This state is entered from the INITIALIZING state.
@@ -309,32 +305,16 @@ static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
return pci_get_drvdata(pdev);
}
+static inline struct Scsi_Host *to_shost(struct isci_host *ihost)
+{
+ return ihost->sas_ha.core.shost;
+}
+
#define for_each_isci_host(id, ihost, pdev) \
for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
ihost = to_pci_info(pdev)->hosts[++id])
-static inline enum isci_status isci_host_get_state(struct isci_host *isci_host)
-{
- return isci_host->status;
-}
-
-static inline void isci_host_change_state(struct isci_host *isci_host,
- enum isci_status status)
-{
- unsigned long flags;
-
- dev_dbg(&isci_host->pdev->dev,
- "%s: isci_host = %p, state = 0x%x",
- __func__,
- isci_host,
- status);
- spin_lock_irqsave(&isci_host->state_lock, flags);
- isci_host->status = status;
- spin_unlock_irqrestore(&isci_host->state_lock, flags);
-
-}
-
static inline void wait_for_start(struct isci_host *ihost)
{
wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
@@ -360,6 +340,11 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
return dev->port->ha->lldd_ha;
}
+static inline struct isci_host *idev_to_ihost(struct isci_remote_device *idev)
+{
+ return dev_to_ihost(idev->domain_dev);
+}
+
/* we always use protocol engine group zero */
#define ISCI_PEG 0
@@ -378,8 +363,7 @@ static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
{
struct domain_device *dev = idev->domain_dev;
- if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
- !idev->is_direct_attached)
+ if (dev_is_sata(dev) && dev->parent)
return SCU_STP_REMOTE_NODE_COUNT;
return SCU_SSP_REMOTE_NODE_COUNT;
}
@@ -475,36 +459,17 @@ void sci_controller_free_remote_node_context(
struct isci_remote_device *idev,
u16 node_id);
-struct isci_request *sci_request_by_tag(struct isci_host *ihost,
- u16 io_tag);
-
-void sci_controller_power_control_queue_insert(
- struct isci_host *ihost,
- struct isci_phy *iphy);
-
-void sci_controller_power_control_queue_remove(
- struct isci_host *ihost,
- struct isci_phy *iphy);
-
-void sci_controller_link_up(
- struct isci_host *ihost,
- struct isci_port *iport,
- struct isci_phy *iphy);
-
-void sci_controller_link_down(
- struct isci_host *ihost,
- struct isci_port *iport,
- struct isci_phy *iphy);
-
-void sci_controller_remote_device_stopped(
- struct isci_host *ihost,
- struct isci_remote_device *idev);
-
-void sci_controller_copy_task_context(
- struct isci_host *ihost,
- struct isci_request *ireq);
-
-void sci_controller_register_setup(struct isci_host *ihost);
+struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag);
+void sci_controller_power_control_queue_insert(struct isci_host *ihost,
+ struct isci_phy *iphy);
+void sci_controller_power_control_queue_remove(struct isci_host *ihost,
+ struct isci_phy *iphy);
+void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy);
+void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy);
+void sci_controller_remote_device_stopped(struct isci_host *ihost,
+ struct isci_remote_device *idev);
enum sci_status sci_controller_continue_io(struct isci_request *ireq);
int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
@@ -512,29 +477,14 @@ void isci_host_scan_start(struct Scsi_Host *);
u16 isci_alloc_tag(struct isci_host *ihost);
enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
void isci_tci_free(struct isci_host *ihost, u16 tci);
+void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task);
int isci_host_init(struct isci_host *);
-
-void isci_host_init_controller_names(
- struct isci_host *isci_host,
- unsigned int controller_idx);
-
-void isci_host_deinit(
- struct isci_host *);
-
-void isci_host_port_link_up(
- struct isci_host *,
- struct isci_port *,
- struct isci_phy *);
-int isci_host_dev_found(struct domain_device *);
-
-void isci_host_remote_device_start_complete(
- struct isci_host *,
- struct isci_remote_device *,
- enum sci_status);
-
-void sci_controller_disable_interrupts(
- struct isci_host *ihost);
+void isci_host_completion_routine(unsigned long data);
+void isci_host_deinit(struct isci_host *);
+void sci_controller_disable_interrupts(struct isci_host *ihost);
+bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost);
+void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status);
enum sci_status sci_controller_start_io(
struct isci_host *ihost,
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 5137db5..47e28b5 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -271,13 +271,12 @@ static void isci_unregister(struct isci_host *isci_host)
if (!isci_host)
return;
- shost = isci_host->shost;
-
sas_unregister_ha(&isci_host->sas_ha);
- sas_remove_host(isci_host->shost);
- scsi_remove_host(isci_host->shost);
- scsi_host_put(isci_host->shost);
+ shost = to_shost(isci_host);
+ sas_remove_host(shost);
+ scsi_remove_host(shost);
+ scsi_host_put(shost);
}
static int __devinit isci_pci_init(struct pci_dev *pdev)
@@ -397,38 +396,199 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
return err;
}
+static void isci_user_parameters_get(struct sci_user_parameters *u)
+{
+ int i;
+
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ struct sci_phy_user_params *u_phy = &u->phys[i];
+
+ u_phy->max_speed_generation = phy_gen;
+
+ /* we are not exporting these for now */
+ u_phy->align_insertion_frequency = 0x7f;
+ u_phy->in_connection_align_insertion_frequency = 0xff;
+ u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
+ }
+
+ u->stp_inactivity_timeout = stp_inactive_to;
+ u->ssp_inactivity_timeout = ssp_inactive_to;
+ u->stp_max_occupancy_timeout = stp_max_occ_to;
+ u->ssp_max_occupancy_timeout = ssp_max_occ_to;
+ u->no_outbound_task_timeout = no_outbound_task_to;
+ u->max_concurr_spinup = max_concurr_spinup;
+}
+
+static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
+ struct sci_user_parameters *sci_parms)
+{
+ u16 index;
+
+ /*
+ * Validate the user parameters. If they are not legal, then
+ * return a failure.
+ */
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ struct sci_phy_user_params *u;
+
+ u = &sci_parms->phys[index];
+
+ if (!((u->max_speed_generation <= SCIC_SDS_PARM_MAX_SPEED) &&
+ (u->max_speed_generation > SCIC_SDS_PARM_NO_SPEED)))
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ if (u->in_connection_align_insertion_frequency < 3)
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ if ((u->in_connection_align_insertion_frequency < 3) ||
+ (u->align_insertion_frequency == 0) ||
+ (u->notify_enable_spin_up_insertion_frequency == 0))
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+ }
+
+ if ((sci_parms->stp_inactivity_timeout == 0) ||
+ (sci_parms->ssp_inactivity_timeout == 0) ||
+ (sci_parms->stp_max_occupancy_timeout == 0) ||
+ (sci_parms->ssp_max_occupancy_timeout == 0) ||
+ (sci_parms->no_outbound_task_timeout == 0))
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
+
+ return SCI_SUCCESS;
+}
+
+static void sci_oem_defaults(struct isci_host *ihost)
+{
+ /* these defaults are overridden by the platform / firmware */
+ struct sci_user_parameters *user = &ihost->user_parameters;
+ struct sci_oem_params *oem = &ihost->oem_parameters;
+ int i;
+
+ /* Default to APC mode. */
+ oem->controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
+
+ /* Default to APC mode. */
+ oem->controller.max_concurr_spin_up = 1;
+
+ /* Default to no SSC operation. */
+ oem->controller.do_enable_ssc = false;
+
+ /* Default to short cables on all phys. */
+ oem->controller.cable_selection_mask = 0;
+
+ /* Initialize all of the port parameter information to narrow ports. */
+ for (i = 0; i < SCI_MAX_PORTS; i++)
+ oem->ports[i].phy_mask = 0;
+
+ /* Initialize all of the phy parameter information. */
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ /* Default to 3G (i.e. Gen 2). */
+ user->phys[i].max_speed_generation = SCIC_SDS_PARM_GEN2_SPEED;
+
+ /* the frequencies cannot be 0 */
+ user->phys[i].align_insertion_frequency = 0x7f;
+ user->phys[i].in_connection_align_insertion_frequency = 0xff;
+ user->phys[i].notify_enable_spin_up_insertion_frequency = 0x33;
+
+ /* Previous Vitesse based expanders had a arbitration issue that
+ * is worked around by having the upper 32-bits of SAS address
+ * with a value greater then the Vitesse company identifier.
+ * Hence, usage of 0x5FCFFFFF.
+ */
+ oem->phys[i].sas_address.low = 0x1 + ihost->id;
+ oem->phys[i].sas_address.high = 0x5FCFFFFF;
+ }
+
+ user->stp_inactivity_timeout = 5;
+ user->ssp_inactivity_timeout = 5;
+ user->stp_max_occupancy_timeout = 5;
+ user->ssp_max_occupancy_timeout = 20;
+ user->no_outbound_task_timeout = 2;
+}
+
static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
{
- struct isci_host *isci_host;
+ struct isci_orom *orom = to_pci_info(pdev)->orom;
+ struct sci_user_parameters sci_user_params;
+ u8 oem_version = ISCI_ROM_VER_1_0;
+ struct isci_host *ihost;
struct Scsi_Host *shost;
- int err;
+ int err, i;
- isci_host = devm_kzalloc(&pdev->dev, sizeof(*isci_host), GFP_KERNEL);
- if (!isci_host)
+ ihost = devm_kzalloc(&pdev->dev, sizeof(*ihost), GFP_KERNEL);
+ if (!ihost)
return NULL;
- isci_host->pdev = pdev;
- isci_host->id = id;
+ ihost->pdev = pdev;
+ ihost->id = id;
+ spin_lock_init(&ihost->scic_lock);
+ init_waitqueue_head(&ihost->eventq);
+ ihost->sas_ha.dev = &ihost->pdev->dev;
+ ihost->sas_ha.lldd_ha = ihost;
+ tasklet_init(&ihost->completion_tasklet,
+ isci_host_completion_routine, (unsigned long)ihost);
+
+ /* validate module parameters */
+ /* TODO: kill struct sci_user_parameters and reference directly */
+ sci_oem_defaults(ihost);
+ isci_user_parameters_get(&sci_user_params);
+ if (sci_user_parameters_set(ihost, &sci_user_params)) {
+ dev_warn(&pdev->dev,
+ "%s: sci_user_parameters_set failed\n", __func__);
+ return NULL;
+ }
+
+ /* sanity check platform (or 'firmware') oem parameters */
+ if (orom) {
+ if (id < 0 || id >= SCI_MAX_CONTROLLERS || id > orom->hdr.num_elements) {
+ dev_warn(&pdev->dev, "parsing firmware oem parameters failed\n");
+ return NULL;
+ }
+ ihost->oem_parameters = orom->ctrl[id];
+ oem_version = orom->hdr.version;
+ }
+
+ /* validate oem parameters (platform, firmware, or built-in defaults) */
+ if (sci_oem_parameters_validate(&ihost->oem_parameters, oem_version)) {
+ dev_warn(&pdev->dev, "oem parameter validation failed\n");
+ return NULL;
+ }
+
+ for (i = 0; i < SCI_MAX_PORTS; i++) {
+ struct isci_port *iport = &ihost->ports[i];
+
+ INIT_LIST_HEAD(&iport->remote_dev_list);
+ iport->isci_host = ihost;
+ }
+
+ for (i = 0; i < SCI_MAX_PHYS; i++)
+ isci_phy_init(&ihost->phys[i], ihost, i);
+
+ for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
+ struct isci_remote_device *idev = &ihost->devices[i];
+
+ INIT_LIST_HEAD(&idev->node);
+ }
shost = scsi_host_alloc(&isci_sht, sizeof(void *));
if (!shost)
return NULL;
- isci_host->shost = shost;
dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: "
"{%s, %s, %s, %s}\n",
- (is_cable_select_overridden() ? "* " : ""), isci_host->id,
- lookup_cable_names(decode_cable_selection(isci_host, 3)),
- lookup_cable_names(decode_cable_selection(isci_host, 2)),
- lookup_cable_names(decode_cable_selection(isci_host, 1)),
- lookup_cable_names(decode_cable_selection(isci_host, 0)));
+ (is_cable_select_overridden() ? "* " : ""), ihost->id,
+ lookup_cable_names(decode_cable_selection(ihost, 3)),
+ lookup_cable_names(decode_cable_selection(ihost, 2)),
+ lookup_cable_names(decode_cable_selection(ihost, 1)),
+ lookup_cable_names(decode_cable_selection(ihost, 0)));
- err = isci_host_init(isci_host);
+ err = isci_host_init(ihost);
if (err)
goto err_shost;
- SHOST_TO_SAS_HA(shost) = &isci_host->sas_ha;
- isci_host->sas_ha.core.shost = shost;
+ SHOST_TO_SAS_HA(shost) = &ihost->sas_ha;
+ ihost->sas_ha.core.shost = shost;
shost->transportt = isci_transport_template;
shost->max_id = ~0;
@@ -439,11 +599,11 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
if (err)
goto err_shost;
- err = isci_register_sas_ha(isci_host);
+ err = isci_register_sas_ha(ihost);
if (err)
goto err_shost_remove;
- return isci_host;
+ return ihost;
err_shost_remove:
scsi_remove_host(shost);
@@ -476,7 +636,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
if (!orom)
orom = isci_request_oprom(pdev);
- for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
+ for (i = 0; orom && i < num_controllers(pdev); i++) {
if (sci_oem_parameters_validate(&orom->ctrl[i],
orom->hdr.version)) {
dev_warn(&pdev->dev,
@@ -525,11 +685,11 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
pci_info->hosts[i] = h;
/* turn on DIF support */
- scsi_host_set_prot(h->shost,
+ scsi_host_set_prot(to_shost(h),
SHOST_DIF_TYPE1_PROTECTION |
SHOST_DIF_TYPE2_PROTECTION |
SHOST_DIF_TYPE3_PROTECTION);
- scsi_host_set_guard(h->shost, SHOST_DIX_GUARD_CRC);
+ scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
}
err = isci_setup_interrupts(pdev);
@@ -537,7 +697,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
goto err_host_alloc;
for_each_isci_host(i, isci_host, pdev)
- scsi_scan_host(isci_host->shost);
+ scsi_scan_host(to_shost(isci_host));
return 0;
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index fab3586..18f43d4 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -580,7 +580,7 @@ static void sci_phy_start_sas_link_training(struct isci_phy *iphy)
sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN);
- iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS;
+ iphy->protocol = SAS_PROTOCOL_SSP;
}
static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
@@ -591,7 +591,7 @@ static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
*/
sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER);
- iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
+ iphy->protocol = SAS_PROTOCOL_SATA;
}
/**
@@ -668,6 +668,19 @@ static const char *phy_event_name(u32 event_code)
phy_to_host(iphy)->id, iphy->phy_index, \
phy_state_name(state), phy_event_name(code), code)
+
+void scu_link_layer_set_txcomsas_timeout(struct isci_phy *iphy, u32 timeout)
+{
+ u32 val;
+
+ /* Extend timeout */
+ val = readl(&iphy->link_layer_registers->transmit_comsas_signal);
+ val &= ~SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK);
+ val |= SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, timeout);
+
+ writel(val, &iphy->link_layer_registers->transmit_comsas_signal);
+}
+
enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
{
enum sci_phy_states state = iphy->sm.current_state_id;
@@ -683,6 +696,13 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
sci_phy_start_sata_link_training(iphy);
iphy->is_in_link_training = true;
break;
+ case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+ /* Extend timeout value */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED);
+
+ /* Start the oob/sn state machine over again */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
default:
phy_event_dbg(iphy, state, event_code);
return SCI_FAILURE;
@@ -717,9 +737,19 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
sci_phy_start_sata_link_training(iphy);
break;
case SCU_EVENT_LINK_FAILURE:
+ /* Change the timeout value to default */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
/* Link failure change state back to the starting state */
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
break;
+ case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+ /* Extend the timeout value */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED);
+
+ /* Start the oob/sn state machine over again */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
default:
phy_event_warn(iphy, state, event_code);
return SCI_FAILURE;
@@ -740,7 +770,14 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
sci_phy_start_sata_link_training(iphy);
break;
case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+ /* Extend the timeout value */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED);
+
+ /* Start the oob/sn state machine over again */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
case SCU_EVENT_LINK_FAILURE:
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
case SCU_EVENT_HARD_RESET_RECEIVED:
/* Start the oob/sn state machine over again */
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
@@ -753,6 +790,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
case SCI_PHY_SUB_AWAIT_SAS_POWER:
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_LINK_FAILURE:
+ /* Change the timeout value to default */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
/* Link failure change state back to the starting state */
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
break;
@@ -764,6 +804,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
case SCI_PHY_SUB_AWAIT_SATA_POWER:
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_LINK_FAILURE:
+ /* Change the timeout value to default */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
/* Link failure change state back to the starting state */
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
break;
@@ -788,6 +831,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_LINK_FAILURE:
+ /* Change the timeout value to default */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
/* Link failure change state back to the starting state */
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
break;
@@ -797,7 +843,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
*/
break;
case SCU_EVENT_SATA_PHY_DETECTED:
- iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
+ iphy->protocol = SAS_PROTOCOL_SATA;
/* We have received the SATA PHY notification change state */
sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
@@ -836,6 +882,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
break;
case SCU_EVENT_LINK_FAILURE:
+ /* Change the timeout value to default */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
/* Link failure change state back to the starting state */
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
break;
@@ -859,6 +908,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
break;
case SCU_EVENT_LINK_FAILURE:
+ /* Change the timeout value to default */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
/* Link failure change state back to the starting state */
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
break;
@@ -871,16 +923,26 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
case SCI_PHY_READY:
switch (scu_get_event_code(event_code)) {
case SCU_EVENT_LINK_FAILURE:
+ /* Set default timeout */
+ scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT);
+
/* Link failure change state back to the starting state */
sci_change_state(&iphy->sm, SCI_PHY_STARTING);
break;
case SCU_EVENT_BROADCAST_CHANGE:
+ case SCU_EVENT_BROADCAST_SES:
+ case SCU_EVENT_BROADCAST_RESERVED0:
+ case SCU_EVENT_BROADCAST_RESERVED1:
+ case SCU_EVENT_BROADCAST_EXPANDER:
+ case SCU_EVENT_BROADCAST_AEN:
/* Broadcast change received. Notify the port. */
if (phy_get_non_dummy_port(iphy) != NULL)
sci_port_broadcast_change_received(iphy->owning_port, iphy);
else
iphy->bcn_received_while_port_unassigned = true;
break;
+ case SCU_EVENT_BROADCAST_RESERVED3:
+ case SCU_EVENT_BROADCAST_RESERVED4:
default:
phy_event_warn(iphy, state, event_code);
return SCI_FAILURE_INVALID_STATE;
@@ -1215,7 +1277,7 @@ static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm)
scu_link_layer_start_oob(iphy);
/* We don't know what kind of phy we are going to be just yet */
- iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
+ iphy->protocol = SAS_PROTOCOL_NONE;
iphy->bcn_received_while_port_unassigned = false;
if (iphy->sm.previous_state_id == SCI_PHY_READY)
@@ -1250,7 +1312,7 @@ static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm)
*/
sci_port_deactivate_phy(iphy->owning_port, iphy, false);
- if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+ if (iphy->protocol == SAS_PROTOCOL_SSP) {
scu_link_layer_tx_hard_reset(iphy);
} else {
/* The SCU does not need to have a discrete reset state so
@@ -1316,7 +1378,7 @@ void sci_phy_construct(struct isci_phy *iphy,
iphy->owning_port = iport;
iphy->phy_index = phy_index;
iphy->bcn_received_while_port_unassigned = false;
- iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
+ iphy->protocol = SAS_PROTOCOL_NONE;
iphy->link_layer_registers = NULL;
iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
@@ -1380,12 +1442,14 @@ int isci_phy_control(struct asd_sas_phy *sas_phy,
switch (func) {
case PHY_FUNC_DISABLE:
spin_lock_irqsave(&ihost->scic_lock, flags);
+ scu_link_layer_start_oob(iphy);
sci_phy_stop(iphy);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
break;
case PHY_FUNC_LINK_RESET:
spin_lock_irqsave(&ihost->scic_lock, flags);
+ scu_link_layer_start_oob(iphy);
sci_phy_stop(iphy);
sci_phy_start(iphy);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h
index 0e45833..45fecfa 100644
--- a/drivers/scsi/isci/phy.h
+++ b/drivers/scsi/isci/phy.h
@@ -76,13 +76,6 @@
*/
#define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250
-enum sci_phy_protocol {
- SCIC_SDS_PHY_PROTOCOL_UNKNOWN,
- SCIC_SDS_PHY_PROTOCOL_SAS,
- SCIC_SDS_PHY_PROTOCOL_SATA,
- SCIC_SDS_MAX_PHY_PROTOCOLS
-};
-
/**
* isci_phy - hba local phy infrastructure
* @sm:
@@ -95,7 +88,7 @@ struct isci_phy {
struct sci_base_state_machine sm;
struct isci_port *owning_port;
enum sas_linkrate max_negotiated_speed;
- enum sci_phy_protocol protocol;
+ enum sas_protocol protocol;
u8 phy_index;
bool bcn_received_while_port_unassigned;
bool is_in_link_training;
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index 5fada73..2fb85bf 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -184,7 +184,7 @@ static void isci_port_link_up(struct isci_host *isci_host,
sci_port_get_properties(iport, &properties);
- if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) {
+ if (iphy->protocol == SAS_PROTOCOL_SATA) {
u64 attached_sas_address;
iphy->sas_phy.oob_mode = SATA_OOB_MODE;
@@ -204,7 +204,7 @@ static void isci_port_link_up(struct isci_host *isci_host,
memcpy(&iphy->sas_phy.attached_sas_addr,
&attached_sas_address, sizeof(attached_sas_address));
- } else if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+ } else if (iphy->protocol == SAS_PROTOCOL_SSP) {
iphy->sas_phy.oob_mode = SAS_OOB_MODE;
iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
@@ -251,10 +251,10 @@ static void isci_port_link_down(struct isci_host *isci_host,
if (isci_phy->sas_phy.port &&
isci_phy->sas_phy.port->num_phys == 1) {
/* change the state for all devices on this port. The
- * next task sent to this device will be returned as
- * SAS_TASK_UNDELIVERED, and the scsi mid layer will
- * remove the target
- */
+ * next task sent to this device will be returned as
+ * SAS_TASK_UNDELIVERED, and the scsi mid layer will
+ * remove the target
+ */
list_for_each_entry(isci_device,
&isci_port->remote_dev_list,
node) {
@@ -517,7 +517,7 @@ void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_a
*/
iphy = sci_port_get_a_connected_phy(iport);
if (iphy) {
- if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) {
+ if (iphy->protocol != SAS_PROTOCOL_SATA) {
sci_phy_get_attached_sas_address(iphy, sas);
} else {
sci_phy_get_sas_address(iphy, sas);
@@ -624,7 +624,7 @@ static void sci_port_activate_phy(struct isci_port *iport,
{
struct isci_host *ihost = iport->owning_controller;
- if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA && (flags & PF_RESUME))
+ if (iphy->protocol != SAS_PROTOCOL_SATA && (flags & PF_RESUME))
sci_phy_resume(iphy);
iport->active_phy_mask |= 1 << iphy->phy_index;
@@ -751,12 +751,10 @@ static bool sci_port_is_wide(struct isci_port *iport)
* wide ports and direct attached phys. Since there are no wide ported SATA
* devices this could become an invalid port configuration.
*/
-bool sci_port_link_detected(
- struct isci_port *iport,
- struct isci_phy *iphy)
+bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy)
{
if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
- (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA)) {
+ (iphy->protocol == SAS_PROTOCOL_SATA)) {
if (sci_port_is_wide(iport)) {
sci_port_invalid_link_up(iport, iphy);
return false;
@@ -1201,6 +1199,8 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
enum sci_status status;
enum sci_port_states state;
+ sci_port_bcn_enable(iport);
+
state = iport->sm.current_state_id;
switch (state) {
case SCI_PORT_STOPPED: {
@@ -1548,6 +1548,29 @@ static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
}
+void sci_port_set_hang_detection_timeout(struct isci_port *iport, u32 timeout)
+{
+ int phy_index;
+ u32 phy_mask = iport->active_phy_mask;
+
+ if (timeout)
+ ++iport->hang_detect_users;
+ else if (iport->hang_detect_users > 1)
+ --iport->hang_detect_users;
+ else
+ iport->hang_detect_users = 0;
+
+ if (timeout || (iport->hang_detect_users == 0)) {
+ for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
+ if ((phy_mask >> phy_index) & 1) {
+ writel(timeout,
+ &iport->phy_table[phy_index]
+ ->link_layer_registers
+ ->link_layer_hang_detection_timeout);
+ }
+ }
+ }
+}
/* --------------------------------------------------------------------------- */
static const struct sci_base_state sci_port_state_table[] = {
@@ -1596,6 +1619,7 @@ void sci_port_construct(struct isci_port *iport, u8 index,
iport->started_request_count = 0;
iport->assigned_device_count = 0;
+ iport->hang_detect_users = 0;
iport->reserved_rni = SCU_DUMMY_INDEX;
iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
@@ -1608,13 +1632,6 @@ void sci_port_construct(struct isci_port *iport, u8 index,
iport->phy_table[index] = NULL;
}
-void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
-{
- INIT_LIST_HEAD(&iport->remote_dev_list);
- INIT_LIST_HEAD(&iport->domain_dev_list);
- iport->isci_host = ihost;
-}
-
void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
{
struct isci_host *ihost = iport->owning_controller;
@@ -1671,17 +1688,6 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
__func__, iport, status);
}
-
- /* If the hard reset for the port has failed, consider this
- * the same as link failures on all phys in the port.
- */
- if (ret != TMF_RESP_FUNC_COMPLETE) {
-
- dev_err(&ihost->pdev->dev,
- "%s: iport = %p; hard reset failed "
- "(0x%x) - driving explicit link fail for all phys\n",
- __func__, iport, iport->hard_reset_status);
- }
return ret;
}
@@ -1740,7 +1746,7 @@ void isci_port_formed(struct asd_sas_phy *phy)
struct isci_host *ihost = phy->ha->lldd_ha;
struct isci_phy *iphy = to_iphy(phy);
struct asd_sas_port *port = phy->port;
- struct isci_port *iport;
+ struct isci_port *iport = NULL;
unsigned long flags;
int i;
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
index 6b56240..861e8f7 100644
--- a/drivers/scsi/isci/port.h
+++ b/drivers/scsi/isci/port.h
@@ -97,7 +97,6 @@ enum isci_status {
struct isci_port {
struct isci_host *isci_host;
struct list_head remote_dev_list;
- struct list_head domain_dev_list;
#define IPORT_RESET_PENDING 0
unsigned long state;
enum sci_status hard_reset_status;
@@ -112,6 +111,7 @@ struct isci_port {
u16 reserved_tag;
u32 started_request_count;
u32 assigned_device_count;
+ u32 hang_detect_users;
u32 not_ready_reason;
struct isci_phy *phy_table[SCI_MAX_PHYS];
struct isci_host *owning_controller;
@@ -270,14 +270,13 @@ void sci_port_get_attached_sas_address(
struct isci_port *iport,
struct sci_sas_address *sas_address);
+void sci_port_set_hang_detection_timeout(
+ struct isci_port *isci_port,
+ u32 timeout);
+
void isci_port_formed(struct asd_sas_phy *);
void isci_port_deformed(struct asd_sas_phy *);
-void isci_port_init(
- struct isci_port *port,
- struct isci_host *host,
- int index);
-
int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
struct isci_phy *iphy);
int isci_ata_check_ready(struct domain_device *dev);
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 6d1e954..cd962da 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -57,7 +57,7 @@
#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10)
#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10)
-#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (250)
+#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (1000)
enum SCIC_SDS_APC_ACTIVITY {
SCIC_SDS_APC_SKIP_PHY,
@@ -472,13 +472,9 @@ sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
* down event or a link up event where we can not yet tell to which a phy
* belongs.
*/
-static void sci_apc_agent_start_timer(
- struct sci_port_configuration_agent *port_agent,
- u32 timeout)
+static void sci_apc_agent_start_timer(struct sci_port_configuration_agent *port_agent,
+ u32 timeout)
{
- if (port_agent->timer_pending)
- sci_del_timer(&port_agent->timer);
-
port_agent->timer_pending = true;
sci_mod_timer(&port_agent->timer, timeout);
}
@@ -697,6 +693,9 @@ static void apc_agent_timeout(unsigned long data)
&ihost->phys[index], false);
}
+ if (is_controller_start_complete(ihost))
+ sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
+
done:
spin_unlock_irqrestore(&ihost->scic_lock, flags);
}
@@ -732,6 +731,11 @@ void sci_port_configuration_agent_construct(
}
}
+bool is_port_config_apc(struct isci_host *ihost)
+{
+ return ihost->port_agent.link_up_handler == sci_apc_agent_link_up;
+}
+
enum sci_status sci_port_configuration_agent_initialize(
struct isci_host *ihost,
struct sci_port_configuration_agent *port_agent)
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
index 9b8117b..4d95654 100644
--- a/drivers/scsi/isci/probe_roms.c
+++ b/drivers/scsi/isci/probe_roms.c
@@ -112,18 +112,6 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
return rom;
}
-enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
- struct isci_orom *orom, int scu_index)
-{
- /* check for valid inputs */
- if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS ||
- scu_index > orom->hdr.num_elements || !oem)
- return -EINVAL;
-
- *oem = orom->ctrl[scu_index];
- return 0;
-}
-
struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw)
{
struct isci_orom *orom = NULL, *data;
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
index bb0e9d4..e08b578 100644
--- a/drivers/scsi/isci/probe_roms.h
+++ b/drivers/scsi/isci/probe_roms.h
@@ -156,8 +156,6 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version);
struct isci_orom;
struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
-enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
- struct isci_orom *orom, int scu_index);
struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw);
struct isci_orom *isci_get_efi_var(struct pci_dev *pdev);
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
index 7eb0ccd..97f3ceb 100644
--- a/drivers/scsi/isci/registers.h
+++ b/drivers/scsi/isci/registers.h
@@ -1239,6 +1239,14 @@ struct scu_transport_layer_registers {
#define SCU_SAS_LLCTL_GEN_BIT(name) \
SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name)
+#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT (0xF0)
+#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED (0x1FF)
+#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_SHIFT (0)
+#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK (0x3FF)
+
+#define SCU_SAS_LLTXCOMSAS_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_TXCOMSAS_ ## name, value)
+
/* #define SCU_FRXHECR_DCNT_OFFSET 0x00B0 */
#define SCU_PSZGCR_OFFSET 0x00E4
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index 8f501b0..c3aa6c5 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -72,46 +72,11 @@ const char *dev_state_name(enum sci_remote_device_states state)
}
#undef C
-/**
- * isci_remote_device_not_ready() - This function is called by the ihost when
- * the remote device is not ready. We mark the isci device as ready (not
- * "ready_for_io") and signal the waiting proccess.
- * @isci_host: This parameter specifies the isci host object.
- * @isci_device: This parameter specifies the remote device
- *
- * sci_lock is held on entrance to this function.
- */
-static void isci_remote_device_not_ready(struct isci_host *ihost,
- struct isci_remote_device *idev, u32 reason)
+enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
+ enum sci_remote_node_suspension_reasons reason)
{
- struct isci_request *ireq;
-
- dev_dbg(&ihost->pdev->dev,
- "%s: isci_device = %p\n", __func__, idev);
-
- switch (reason) {
- case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED:
- set_bit(IDEV_GONE, &idev->flags);
- break;
- case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
- set_bit(IDEV_IO_NCQERROR, &idev->flags);
-
- /* Kill all outstanding requests for the device. */
- list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) {
-
- dev_dbg(&ihost->pdev->dev,
- "%s: isci_device = %p request = %p\n",
- __func__, idev, ireq);
-
- sci_controller_terminate_request(ihost,
- idev,
- ireq);
- }
- /* Fall through into the default case... */
- default:
- clear_bit(IDEV_IO_READY, &idev->flags);
- break;
- }
+ return sci_remote_node_context_suspend(&idev->rnc, reason,
+ SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
}
/**
@@ -133,18 +98,29 @@ static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote
wake_up(&ihost->eventq);
}
-/* called once the remote node context is ready to be freed.
- * The remote device can now report that its stop operation is complete. none
- */
-static void rnc_destruct_done(void *_dev)
+static enum sci_status sci_remote_device_terminate_req(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ int check_abort,
+ struct isci_request *ireq)
{
- struct isci_remote_device *idev = _dev;
+ if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
+ (ireq->target_device != idev) ||
+ (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags)))
+ return SCI_SUCCESS;
- BUG_ON(idev->started_request_count != 0);
- sci_change_state(&idev->sm, SCI_DEV_STOPPED);
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev=%p; flags=%lx; req=%p; req target=%p\n",
+ __func__, idev, idev->flags, ireq, ireq->target_device);
+
+ set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
+
+ return sci_controller_terminate_request(ihost, idev, ireq);
}
-static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev)
+static enum sci_status sci_remote_device_terminate_reqs_checkabort(
+ struct isci_remote_device *idev,
+ int chk)
{
struct isci_host *ihost = idev->owning_port->owning_controller;
enum sci_status status = SCI_SUCCESS;
@@ -154,18 +130,210 @@ static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_d
struct isci_request *ireq = ihost->reqs[i];
enum sci_status s;
- if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
- ireq->target_device != idev)
- continue;
-
- s = sci_controller_terminate_request(ihost, idev, ireq);
+ s = sci_remote_device_terminate_req(ihost, idev, chk, ireq);
if (s != SCI_SUCCESS)
status = s;
}
+ return status;
+}
+
+static bool isci_compare_suspendcount(
+ struct isci_remote_device *idev,
+ u32 localcount)
+{
+ smp_rmb();
+
+ /* Check for a change in the suspend count, or the RNC
+ * being destroyed.
+ */
+ return (localcount != idev->rnc.suspend_count)
+ || sci_remote_node_context_is_being_destroyed(&idev->rnc);
+}
+
+static bool isci_check_reqterm(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq,
+ u32 localcount)
+{
+ unsigned long flags;
+ bool res;
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ res = isci_compare_suspendcount(idev, localcount)
+ && !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ return res;
+}
+
+static bool isci_check_devempty(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u32 localcount)
+{
+ unsigned long flags;
+ bool res;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ res = isci_compare_suspendcount(idev, localcount)
+ && idev->started_request_count == 0;
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ return res;
+}
+
+enum sci_status isci_remote_device_terminate_requests(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_status status = SCI_SUCCESS;
+ unsigned long flags;
+ u32 rnc_suspend_count;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (isci_get_device(idev) == NULL) {
+ dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n",
+ __func__, idev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ status = SCI_FAILURE;
+ } else {
+ /* If already suspended, don't wait for another suspension. */
+ smp_rmb();
+ rnc_suspend_count
+ = sci_remote_node_context_is_suspended(&idev->rnc)
+ ? 0 : idev->rnc.suspend_count;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev=%p, ireq=%p; started_request_count=%d, "
+ "rnc_suspend_count=%d, rnc.suspend_count=%d"
+ "about to wait\n",
+ __func__, idev, ireq, idev->started_request_count,
+ rnc_suspend_count, idev->rnc.suspend_count);
+
+ #define MAX_SUSPEND_MSECS 10000
+ if (ireq) {
+ /* Terminate a specific TC. */
+ set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
+ sci_remote_device_terminate_req(ihost, idev, 0, ireq);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ if (!wait_event_timeout(ihost->eventq,
+ isci_check_reqterm(ihost, idev, ireq,
+ rnc_suspend_count),
+ msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
+
+ dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n",
+ __func__, ihost->id);
+ dev_dbg(&ihost->pdev->dev,
+ "%s: ******* Timeout waiting for "
+ "suspend; idev=%p, current state %s; "
+ "started_request_count=%d, flags=%lx\n\t"
+ "rnc_suspend_count=%d, rnc.suspend_count=%d "
+ "RNC: current state %s, current "
+ "suspend_type %x dest state %d;\n"
+ "ireq=%p, ireq->flags = %lx\n",
+ __func__, idev,
+ dev_state_name(idev->sm.current_state_id),
+ idev->started_request_count, idev->flags,
+ rnc_suspend_count, idev->rnc.suspend_count,
+ rnc_state_name(idev->rnc.sm.current_state_id),
+ idev->rnc.suspend_type,
+ idev->rnc.destination_state,
+ ireq, ireq->flags);
+ }
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags);
+ if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
+ isci_free_tag(ihost, ireq->io_tag);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ } else {
+ /* Terminate all TCs. */
+ sci_remote_device_terminate_requests(idev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ if (!wait_event_timeout(ihost->eventq,
+ isci_check_devempty(ihost, idev,
+ rnc_suspend_count),
+ msecs_to_jiffies(MAX_SUSPEND_MSECS))) {
+
+ dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n",
+ __func__, ihost->id);
+ dev_dbg(&ihost->pdev->dev,
+ "%s: ******* Timeout waiting for "
+ "suspend; idev=%p, current state %s; "
+ "started_request_count=%d, flags=%lx\n\t"
+ "rnc_suspend_count=%d, "
+ "RNC: current state %s, "
+ "rnc.suspend_count=%d, current "
+ "suspend_type %x dest state %d\n",
+ __func__, idev,
+ dev_state_name(idev->sm.current_state_id),
+ idev->started_request_count, idev->flags,
+ rnc_suspend_count,
+ rnc_state_name(idev->rnc.sm.current_state_id),
+ idev->rnc.suspend_count,
+ idev->rnc.suspend_type,
+ idev->rnc.destination_state);
+ }
+ }
+ dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n",
+ __func__, idev);
+ isci_put_device(idev);
+ }
return status;
}
+/**
+* isci_remote_device_not_ready() - This function is called by the ihost when
+* the remote device is not ready. We mark the isci device as ready (not
+* "ready_for_io") and signal the waiting proccess.
+* @isci_host: This parameter specifies the isci host object.
+* @isci_device: This parameter specifies the remote device
+*
+* sci_lock is held on entrance to this function.
+*/
+static void isci_remote_device_not_ready(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u32 reason)
+{
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p; reason = %d\n", __func__, idev, reason);
+
+ switch (reason) {
+ case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
+ set_bit(IDEV_IO_NCQERROR, &idev->flags);
+
+ /* Suspend the remote device so the I/O can be terminated. */
+ sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
+
+ /* Kill all outstanding requests for the device. */
+ sci_remote_device_terminate_requests(idev);
+
+ /* Fall through into the default case... */
+ default:
+ clear_bit(IDEV_IO_READY, &idev->flags);
+ break;
+ }
+}
+
+/* called once the remote node context is ready to be freed.
+ * The remote device can now report that its stop operation is complete. none
+ */
+static void rnc_destruct_done(void *_dev)
+{
+ struct isci_remote_device *idev = _dev;
+
+ BUG_ON(idev->started_request_count != 0);
+ sci_change_state(&idev->sm, SCI_DEV_STOPPED);
+}
+
+enum sci_status sci_remote_device_terminate_requests(
+ struct isci_remote_device *idev)
+{
+ return sci_remote_device_terminate_reqs_checkabort(idev, 0);
+}
+
enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
u32 timeout)
{
@@ -201,13 +369,16 @@ enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
case SCI_SMP_DEV_IDLE:
case SCI_SMP_DEV_CMD:
sci_change_state(sm, SCI_DEV_STOPPING);
- if (idev->started_request_count == 0) {
+ if (idev->started_request_count == 0)
sci_remote_node_context_destruct(&idev->rnc,
- rnc_destruct_done, idev);
- return SCI_SUCCESS;
- } else
- return sci_remote_device_terminate_requests(idev);
- break;
+ rnc_destruct_done,
+ idev);
+ else {
+ sci_remote_device_suspend(
+ idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
+ sci_remote_device_terminate_requests(idev);
+ }
+ return SCI_SUCCESS;
case SCI_DEV_STOPPING:
/* All requests should have been terminated, but if there is an
* attempt to stop a device already in the stopping state, then
@@ -265,22 +436,6 @@ enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev
return SCI_SUCCESS;
}
-enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
- u32 suspend_type)
-{
- struct sci_base_state_machine *sm = &idev->sm;
- enum sci_remote_device_states state = sm->current_state_id;
-
- if (state != SCI_STP_DEV_CMD) {
- dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
- __func__, dev_state_name(state));
- return SCI_FAILURE_INVALID_STATE;
- }
-
- return sci_remote_node_context_suspend(&idev->rnc,
- suspend_type, NULL, NULL);
-}
-
enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
u32 frame_index)
{
@@ -412,9 +567,9 @@ static void atapi_remote_device_resume_done(void *_dev)
enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
u32 event_code)
{
+ enum sci_status status;
struct sci_base_state_machine *sm = &idev->sm;
enum sci_remote_device_states state = sm->current_state_id;
- enum sci_status status;
switch (scu_get_event_type(event_code)) {
case SCU_EVENT_TYPE_RNC_OPS_MISC:
@@ -427,9 +582,7 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
status = SCI_SUCCESS;
/* Suspend the associated RNC */
- sci_remote_node_context_suspend(&idev->rnc,
- SCI_SOFTWARE_SUSPENSION,
- NULL, NULL);
+ sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL);
dev_dbg(scirdev_to_dev(idev),
"%s: device: %p event code: %x: %s\n",
@@ -455,6 +608,10 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
if (status != SCI_SUCCESS)
return status;
+ /* Decode device-specific states that may require an RNC resume during
+ * normal operation. When the abort path is active, these resumes are
+ * managed when the abort path exits.
+ */
if (state == SCI_STP_DEV_ATAPI_ERROR) {
/* For ATAPI error state resume the RNC right away. */
if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
@@ -743,10 +900,6 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
if (status != SCI_SUCCESS)
return status;
- status = sci_remote_node_context_start_task(&idev->rnc, ireq);
- if (status != SCI_SUCCESS)
- goto out;
-
status = sci_request_start(ireq);
if (status != SCI_SUCCESS)
goto out;
@@ -765,11 +918,11 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
* the correct action when the remote node context is suspended
* and later resumed.
*/
- sci_remote_node_context_suspend(&idev->rnc,
- SCI_SOFTWARE_SUSPENSION, NULL, NULL);
- sci_remote_node_context_resume(&idev->rnc,
- sci_remote_device_continue_request,
- idev);
+ sci_remote_device_suspend(idev,
+ SCI_SW_SUSPEND_LINKHANG_DETECT);
+
+ status = sci_remote_node_context_start_task(&idev->rnc, ireq,
+ sci_remote_device_continue_request, idev);
out:
sci_remote_device_start_request(idev, ireq, status);
@@ -783,7 +936,9 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
if (status != SCI_SUCCESS)
return status;
- status = sci_remote_node_context_start_task(&idev->rnc, ireq);
+ /* Resume the RNC as needed: */
+ status = sci_remote_node_context_start_task(&idev->rnc, ireq,
+ NULL, NULL);
if (status != SCI_SUCCESS)
break;
@@ -892,7 +1047,7 @@ static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_
* here should go through isci_remote_device_nuke_requests.
* If we hit this condition, we will need a way to complete
* io requests in process */
- BUG_ON(!list_empty(&idev->reqs_in_process));
+ BUG_ON(idev->started_request_count > 0);
sci_remote_device_destruct(idev);
list_del_init(&idev->node);
@@ -954,14 +1109,21 @@ static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm
static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
- sci_remote_node_context_suspend(
- &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL);
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p\n", __func__, idev);
+
+ sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
}
static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
{
struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p\n", __func__, idev);
sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
}
@@ -1113,33 +1275,20 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
{
enum sci_status status;
struct sci_port_properties properties;
- struct domain_device *dev = idev->domain_dev;
sci_remote_device_construct(iport, idev);
- /*
- * This information is request to determine how many remote node context
- * entries will be needed to store the remote node.
- */
- idev->is_direct_attached = true;
-
sci_port_get_properties(iport, &properties);
/* Get accurate port width from port's phy mask for a DA device. */
idev->device_port_width = hweight32(properties.phy_mask);
status = sci_controller_allocate_remote_node_context(iport->owning_controller,
- idev,
- &idev->rnc.remote_node_index);
+ idev,
+ &idev->rnc.remote_node_index);
if (status != SCI_SUCCESS)
return status;
- if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
- (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
- /* pass */;
- else
- return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
-
idev->connection_rate = sci_port_get_max_allowed_speed(iport);
return SCI_SUCCESS;
@@ -1171,19 +1320,13 @@ static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
if (status != SCI_SUCCESS)
return status;
- if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
- (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
- /* pass */;
- else
- return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
-
- /*
- * For SAS-2 the physical link rate is actually a logical link
+ /* For SAS-2 the physical link rate is actually a logical link
* rate that incorporates multiplexing. The SCU doesn't
* incorporate multiplexing and for the purposes of the
* connection the logical link rate is that same as the
* physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay
- * one another, so this code works for both situations. */
+ * one another, so this code works for both situations.
+ */
idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
dev->linkrate);
@@ -1193,6 +1336,105 @@ static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
return SCI_SUCCESS;
}
+enum sci_status sci_remote_device_resume(
+ struct isci_remote_device *idev,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p)
+{
+ enum sci_status status;
+
+ status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p);
+ if (status != SCI_SUCCESS)
+ dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n",
+ __func__, status);
+ return status;
+}
+
+static void isci_remote_device_resume_from_abort_complete(void *cbparam)
+{
+ struct isci_remote_device *idev = cbparam;
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+ scics_sds_remote_node_context_callback abort_resume_cb =
+ idev->abort_resume_cb;
+
+ dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n",
+ __func__, abort_resume_cb);
+
+ if (abort_resume_cb != NULL) {
+ idev->abort_resume_cb = NULL;
+ abort_resume_cb(idev->abort_resume_cbparam);
+ }
+ clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
+ wake_up(&ihost->eventq);
+}
+
+static bool isci_remote_device_test_resume_done(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev)
+{
+ unsigned long flags;
+ bool done;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ done = !test_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags)
+ || test_bit(IDEV_STOP_PENDING, &idev->flags)
+ || sci_remote_node_context_is_being_destroyed(&idev->rnc);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ return done;
+}
+
+void isci_remote_device_wait_for_resume_from_abort(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev)
+{
+ dev_dbg(&ihost->pdev->dev, "%s: starting resume wait: %p\n",
+ __func__, idev);
+
+ #define MAX_RESUME_MSECS 10000
+ if (!wait_event_timeout(ihost->eventq,
+ isci_remote_device_test_resume_done(ihost, idev),
+ msecs_to_jiffies(MAX_RESUME_MSECS))) {
+
+ dev_warn(&ihost->pdev->dev, "%s: #### Timeout waiting for "
+ "resume: %p\n", __func__, idev);
+ }
+ clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
+
+ dev_dbg(&ihost->pdev->dev, "%s: resume wait done: %p\n",
+ __func__, idev);
+}
+
+enum sci_status isci_remote_device_resume_from_abort(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev)
+{
+ unsigned long flags;
+ enum sci_status status = SCI_SUCCESS;
+ int destroyed;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ /* Preserve any current resume callbacks, for instance from other
+ * resumptions.
+ */
+ idev->abort_resume_cb = idev->rnc.user_callback;
+ idev->abort_resume_cbparam = idev->rnc.user_cookie;
+ set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
+ clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
+ destroyed = sci_remote_node_context_is_being_destroyed(&idev->rnc);
+ if (!destroyed)
+ status = sci_remote_device_resume(
+ idev, isci_remote_device_resume_from_abort_complete,
+ idev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ if (!destroyed && (status == SCI_SUCCESS))
+ isci_remote_device_wait_for_resume_from_abort(ihost, idev);
+ else
+ clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags);
+
+ return status;
+}
+
/**
* sci_remote_device_start() - This method will start the supplied remote
* device. This method enables normal IO requests to flow through to the
@@ -1207,7 +1449,7 @@ static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
* the device when there have been no phys added to it.
*/
static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
- u32 timeout)
+ u32 timeout)
{
struct sci_base_state_machine *sm = &idev->sm;
enum sci_remote_device_states state = sm->current_state_id;
@@ -1219,9 +1461,8 @@ static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
return SCI_FAILURE_INVALID_STATE;
}
- status = sci_remote_node_context_resume(&idev->rnc,
- remote_device_resume_done,
- idev);
+ status = sci_remote_device_resume(idev, remote_device_resume_done,
+ idev);
if (status != SCI_SUCCESS)
return status;
@@ -1259,20 +1500,6 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport,
return status;
}
-void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev)
-{
- DECLARE_COMPLETION_ONSTACK(aborted_task_completion);
-
- dev_dbg(&ihost->pdev->dev,
- "%s: idev = %p\n", __func__, idev);
-
- /* Cleanup all requests pending for this device. */
- isci_terminate_pending_requests(ihost, idev);
-
- dev_dbg(&ihost->pdev->dev,
- "%s: idev = %p, done\n", __func__, idev);
-}
-
/**
* This function builds the isci_remote_device when a libsas dev_found message
* is received.
@@ -1297,10 +1524,6 @@ isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
return NULL;
}
-
- if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n"))
- return NULL;
-
if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
return NULL;
@@ -1342,14 +1565,8 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem
spin_lock_irqsave(&ihost->scic_lock, flags);
idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
set_bit(IDEV_GONE, &idev->flags);
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
- /* Kill all outstanding requests. */
- isci_remote_device_nuke_requests(ihost, idev);
set_bit(IDEV_STOP_PENDING, &idev->flags);
-
- spin_lock_irqsave(&ihost->scic_lock, flags);
status = sci_remote_device_stop(idev, 50);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
@@ -1359,6 +1576,9 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem
else
wait_for_device_stop(ihost, idev);
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p, waiting done.\n", __func__, idev);
+
return status;
}
@@ -1434,3 +1654,73 @@ int isci_remote_device_found(struct domain_device *dev)
return status == SCI_SUCCESS ? 0 : -ENODEV;
}
+
+enum sci_status isci_remote_device_suspend_terminate(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ unsigned long flags;
+ enum sci_status status;
+
+ /* Put the device into suspension. */
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags);
+ sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /* Terminate and wait for the completions. */
+ status = isci_remote_device_terminate_requests(ihost, idev, ireq);
+ if (status != SCI_SUCCESS)
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_remote_device_terminate_requests(%p) "
+ "returned %d!\n",
+ __func__, idev, status);
+
+ /* NOTE: RNC resumption is left to the caller! */
+ return status;
+}
+
+int isci_remote_device_is_safe_to_abort(
+ struct isci_remote_device *idev)
+{
+ return sci_remote_node_context_is_safe_to_abort(&idev->rnc);
+}
+
+enum sci_status sci_remote_device_abort_requests_pending_abort(
+ struct isci_remote_device *idev)
+{
+ return sci_remote_device_terminate_reqs_checkabort(idev, 1);
+}
+
+enum sci_status isci_remote_device_reset_complete(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev)
+{
+ unsigned long flags;
+ enum sci_status status;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ status = sci_remote_device_reset_complete(idev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ return status;
+}
+
+void isci_dev_set_hang_detection_timeout(
+ struct isci_remote_device *idev,
+ u32 timeout)
+{
+ if (dev_is_sata(idev->domain_dev)) {
+ if (timeout) {
+ if (test_and_set_bit(IDEV_RNC_LLHANG_ENABLED,
+ &idev->flags))
+ return; /* Already enabled. */
+ } else if (!test_and_clear_bit(IDEV_RNC_LLHANG_ENABLED,
+ &idev->flags))
+ return; /* Not enabled. */
+
+ sci_port_set_hang_detection_timeout(idev->owning_port,
+ timeout);
+ }
+}
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 58637ee..7674caa 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -85,27 +85,38 @@ struct isci_remote_device {
#define IDEV_GONE 3
#define IDEV_IO_READY 4
#define IDEV_IO_NCQERROR 5
+ #define IDEV_RNC_LLHANG_ENABLED 6
+ #define IDEV_ABORT_PATH_ACTIVE 7
+ #define IDEV_ABORT_PATH_RESUME_PENDING 8
unsigned long flags;
struct kref kref;
struct isci_port *isci_port;
struct domain_device *domain_dev;
struct list_head node;
- struct list_head reqs_in_process;
struct sci_base_state_machine sm;
u32 device_port_width;
enum sas_linkrate connection_rate;
- bool is_direct_attached;
struct isci_port *owning_port;
struct sci_remote_node_context rnc;
/* XXX unify with device reference counting and delete */
u32 started_request_count;
struct isci_request *working_request;
u32 not_ready_reason;
+ scics_sds_remote_node_context_callback abort_resume_cb;
+ void *abort_resume_cbparam;
};
#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000
/* device reference routines must be called under sci_lock */
+static inline struct isci_remote_device *isci_get_device(
+ struct isci_remote_device *idev)
+{
+ if (idev)
+ kref_get(&idev->kref);
+ return idev;
+}
+
static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev)
{
struct isci_remote_device *idev = dev->lldd_dev;
@@ -302,6 +313,8 @@ static inline void sci_remote_device_decrement_request_count(struct isci_remote_
idev->started_request_count--;
}
+void isci_dev_set_hang_detection_timeout(struct isci_remote_device *idev, u32 timeout);
+
enum sci_status sci_remote_device_frame_handler(
struct isci_remote_device *idev,
u32 frame_index);
@@ -325,12 +338,50 @@ enum sci_status sci_remote_device_complete_io(
struct isci_remote_device *idev,
struct isci_request *ireq);
-enum sci_status sci_remote_device_suspend(
- struct isci_remote_device *idev,
- u32 suspend_type);
-
void sci_remote_device_post_request(
struct isci_remote_device *idev,
u32 request);
+enum sci_status sci_remote_device_terminate_requests(
+ struct isci_remote_device *idev);
+
+int isci_remote_device_is_safe_to_abort(
+ struct isci_remote_device *idev);
+
+enum sci_status
+sci_remote_device_abort_requests_pending_abort(
+ struct isci_remote_device *idev);
+
+enum sci_status isci_remote_device_suspend(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev);
+
+enum sci_status sci_remote_device_resume(
+ struct isci_remote_device *idev,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p);
+
+enum sci_status isci_remote_device_resume_from_abort(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev);
+
+enum sci_status isci_remote_device_reset(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev);
+
+enum sci_status isci_remote_device_reset_complete(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev);
+
+enum sci_status isci_remote_device_suspend_terminate(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status isci_remote_device_terminate_requests(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
+ enum sci_remote_node_suspension_reasons reason);
#endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 3a94634..1910100 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -52,7 +52,7 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-
+#include <scsi/sas_ata.h>
#include "host.h"
#include "isci.h"
#include "remote_device.h"
@@ -90,6 +90,15 @@ bool sci_remote_node_context_is_ready(
return false;
}
+bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc)
+{
+ u32 current_state = sci_rnc->sm.current_state_id;
+
+ if (current_state == SCI_RNC_TX_RX_SUSPENDED)
+ return true;
+ return false;
+}
+
static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
{
if (id < ihost->remote_node_entries &&
@@ -131,7 +140,7 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont
rnc->ssp.arbitration_wait_time = 0;
- if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+ if (dev_is_sata(dev)) {
rnc->ssp.connection_occupancy_timeout =
ihost->user_parameters.stp_max_occupancy_timeout;
rnc->ssp.connection_inactivity_timeout =
@@ -151,7 +160,6 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont
rnc->ssp.oaf_source_zone_group = 0;
rnc->ssp.oaf_more_compatibility_features = 0;
}
-
/**
*
* @sci_rnc:
@@ -165,23 +173,30 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont
static void sci_remote_node_context_setup_to_resume(
struct sci_remote_node_context *sci_rnc,
scics_sds_remote_node_context_callback callback,
- void *callback_parameter)
+ void *callback_parameter,
+ enum sci_remote_node_context_destination_state dest_param)
{
- if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) {
- sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY;
- sci_rnc->user_callback = callback;
- sci_rnc->user_cookie = callback_parameter;
+ if (sci_rnc->destination_state != RNC_DEST_FINAL) {
+ sci_rnc->destination_state = dest_param;
+ if (callback != NULL) {
+ sci_rnc->user_callback = callback;
+ sci_rnc->user_cookie = callback_parameter;
+ }
}
}
-static void sci_remote_node_context_setup_to_destory(
+static void sci_remote_node_context_setup_to_destroy(
struct sci_remote_node_context *sci_rnc,
scics_sds_remote_node_context_callback callback,
void *callback_parameter)
{
- sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL;
+ struct isci_host *ihost = idev_to_ihost(rnc_to_dev(sci_rnc));
+
+ sci_rnc->destination_state = RNC_DEST_FINAL;
sci_rnc->user_callback = callback;
sci_rnc->user_cookie = callback_parameter;
+
+ wake_up(&ihost->eventq);
}
/**
@@ -203,9 +218,19 @@ static void sci_remote_node_context_notify_user(
static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
{
- if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
+ switch (rnc->destination_state) {
+ case RNC_DEST_READY:
+ case RNC_DEST_SUSPENDED_RESUME:
+ rnc->destination_state = RNC_DEST_READY;
+ /* Fall through... */
+ case RNC_DEST_FINAL:
sci_remote_node_context_resume(rnc, rnc->user_callback,
- rnc->user_cookie);
+ rnc->user_cookie);
+ break;
+ default:
+ rnc->destination_state = RNC_DEST_UNSPECIFIED;
+ break;
+ }
}
static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
@@ -219,13 +244,12 @@ static void sci_remote_node_context_validate_context_buffer(struct sci_remote_no
rnc_buffer->ssp.is_valid = true;
- if (!idev->is_direct_attached &&
- (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) {
+ if (dev_is_sata(dev) && dev->parent) {
sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
} else {
sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
- if (idev->is_direct_attached)
+ if (!dev->parent)
sci_port_setup_transports(idev->owning_port,
sci_rnc->remote_node_index);
}
@@ -248,13 +272,18 @@ static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_
static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
{
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+ struct isci_remote_device *idev = rnc_to_dev(rnc);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
/* Check to see if we have gotten back to the initial state because
* someone requested to destroy the remote node context object.
*/
if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
- rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+ rnc->destination_state = RNC_DEST_UNSPECIFIED;
sci_remote_node_context_notify_user(rnc);
+
+ smp_wmb();
+ wake_up(&ihost->eventq);
}
}
@@ -269,6 +298,8 @@ static void sci_remote_node_context_invalidating_state_enter(struct sci_base_sta
{
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+ /* Terminate all outstanding requests. */
+ sci_remote_device_terminate_requests(rnc_to_dev(rnc));
sci_remote_node_context_invalidate_context_buffer(rnc);
}
@@ -287,10 +318,8 @@ static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_m
* resume because of a target reset we also need to update
* the STPTLDARNI register with the RNi of the device
*/
- if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
- idev->is_direct_attached)
- sci_port_setup_transports(idev->owning_port,
- rnc->remote_node_index);
+ if (dev_is_sata(dev) && !dev->parent)
+ sci_port_setup_transports(idev->owning_port, rnc->remote_node_index);
sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
}
@@ -298,10 +327,22 @@ static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_m
static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
{
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+ enum sci_remote_node_context_destination_state dest_select;
+ int tell_user = 1;
+
+ dest_select = rnc->destination_state;
+ rnc->destination_state = RNC_DEST_UNSPECIFIED;
- rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+ if ((dest_select == RNC_DEST_SUSPENDED) ||
+ (dest_select == RNC_DEST_SUSPENDED_RESUME)) {
+ sci_remote_node_context_suspend(
+ rnc, rnc->suspend_reason,
+ SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT);
- if (rnc->user_callback)
+ if (dest_select == RNC_DEST_SUSPENDED_RESUME)
+ tell_user = 0; /* Wait until ready again. */
+ }
+ if (tell_user)
sci_remote_node_context_notify_user(rnc);
}
@@ -315,10 +356,34 @@ static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_sta
static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
{
struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+ struct isci_remote_device *idev = rnc_to_dev(rnc);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+ u32 new_count = rnc->suspend_count + 1;
+
+ if (new_count == 0)
+ rnc->suspend_count = 1;
+ else
+ rnc->suspend_count = new_count;
+ smp_wmb();
+ /* Terminate outstanding requests pending abort. */
+ sci_remote_device_abort_requests_pending_abort(idev);
+
+ wake_up(&ihost->eventq);
sci_remote_node_context_continue_state_transitions(rnc);
}
+static void sci_remote_node_context_await_suspend_state_exit(
+ struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc
+ = container_of(sm, typeof(*rnc), sm);
+ struct isci_remote_device *idev = rnc_to_dev(rnc);
+
+ if (dev_is_sata(idev->domain_dev))
+ isci_dev_set_hang_detection_timeout(idev, 0);
+}
+
static const struct sci_base_state sci_remote_node_context_state_table[] = {
[SCI_RNC_INITIAL] = {
.enter_state = sci_remote_node_context_initial_state_enter,
@@ -341,7 +406,9 @@ static const struct sci_base_state sci_remote_node_context_state_table[] = {
[SCI_RNC_TX_RX_SUSPENDED] = {
.enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
},
- [SCI_RNC_AWAIT_SUSPENSION] = { },
+ [SCI_RNC_AWAIT_SUSPENSION] = {
+ .exit_state = sci_remote_node_context_await_suspend_state_exit,
+ },
};
void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
@@ -350,7 +417,7 @@ void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
memset(rnc, 0, sizeof(struct sci_remote_node_context));
rnc->remote_node_index = remote_node_index;
- rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+ rnc->destination_state = RNC_DEST_UNSPECIFIED;
sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
}
@@ -359,6 +426,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
u32 event_code)
{
enum scis_sds_remote_node_context_states state;
+ u32 next_state;
state = sci_rnc->sm.current_state_id;
switch (state) {
@@ -373,18 +441,18 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
break;
case SCI_RNC_INVALIDATING:
if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
- if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL)
- state = SCI_RNC_INITIAL;
+ if (sci_rnc->destination_state == RNC_DEST_FINAL)
+ next_state = SCI_RNC_INITIAL;
else
- state = SCI_RNC_POSTING;
- sci_change_state(&sci_rnc->sm, state);
+ next_state = SCI_RNC_POSTING;
+ sci_change_state(&sci_rnc->sm, next_state);
} else {
switch (scu_get_event_type(event_code)) {
case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
/* We really dont care if the hardware is going to suspend
* the device since it's being invalidated anyway */
- dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: SCIC Remote Node Context 0x%p was "
"suspeneded by hardware while being "
"invalidated.\n", __func__, sci_rnc);
@@ -403,7 +471,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
/* We really dont care if the hardware is going to suspend
* the device since it's being resumed anyway */
- dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: SCIC Remote Node Context 0x%p was "
"suspeneded by hardware while being resumed.\n",
__func__, sci_rnc);
@@ -417,11 +485,11 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
switch (scu_get_event_type(event_code)) {
case SCU_EVENT_TL_RNC_SUSPEND_TX:
sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
- sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+ sci_rnc->suspend_type = scu_get_event_type(event_code);
break;
case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
- sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+ sci_rnc->suspend_type = scu_get_event_type(event_code);
break;
default:
goto out;
@@ -430,27 +498,29 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con
case SCI_RNC_AWAIT_SUSPENSION:
switch (scu_get_event_type(event_code)) {
case SCU_EVENT_TL_RNC_SUSPEND_TX:
- sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
- sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+ next_state = SCI_RNC_TX_SUSPENDED;
break;
case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
- sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
- sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+ next_state = SCI_RNC_TX_RX_SUSPENDED;
break;
default:
goto out;
}
+ if (sci_rnc->suspend_type == scu_get_event_type(event_code))
+ sci_change_state(&sci_rnc->sm, next_state);
break;
default:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
- "%s: invalid state %d\n", __func__, state);
+ "%s: invalid state: %s\n", __func__,
+ rnc_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
return SCI_SUCCESS;
out:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
- "%s: code: %#x state: %d\n", __func__, event_code, state);
+ "%s: code: %#x state: %s\n", __func__, event_code,
+ rnc_state_name(state));
return SCI_FAILURE;
}
@@ -464,20 +534,23 @@ enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context
state = sci_rnc->sm.current_state_id;
switch (state) {
case SCI_RNC_INVALIDATING:
- sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
+ sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
return SCI_SUCCESS;
case SCI_RNC_POSTING:
case SCI_RNC_RESUMING:
case SCI_RNC_READY:
case SCI_RNC_TX_SUSPENDED:
case SCI_RNC_TX_RX_SUSPENDED:
- case SCI_RNC_AWAIT_SUSPENSION:
- sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
+ sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
return SCI_SUCCESS;
+ case SCI_RNC_AWAIT_SUSPENSION:
+ sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p);
+ return SCI_SUCCESS;
case SCI_RNC_INITIAL:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
- "%s: invalid state %d\n", __func__, state);
+ "%s: invalid state: %s\n", __func__,
+ rnc_state_name(state));
/* We have decided that the destruct request on the remote node context
* can not fail since it is either in the initial/destroyed state or is
* can be destroyed.
@@ -485,35 +558,101 @@ enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context
return SCI_SUCCESS;
default:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
- "%s: invalid state %d\n", __func__, state);
+ "%s: invalid state %s\n", __func__,
+ rnc_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
-enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
- u32 suspend_type,
- scics_sds_remote_node_context_callback cb_fn,
- void *cb_p)
+enum sci_status sci_remote_node_context_suspend(
+ struct sci_remote_node_context *sci_rnc,
+ enum sci_remote_node_suspension_reasons suspend_reason,
+ u32 suspend_type)
{
- enum scis_sds_remote_node_context_states state;
+ enum scis_sds_remote_node_context_states state
+ = sci_rnc->sm.current_state_id;
+ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+ enum sci_status status = SCI_FAILURE_INVALID_STATE;
+ enum sci_remote_node_context_destination_state dest_param =
+ RNC_DEST_UNSPECIFIED;
+
+ dev_dbg(scirdev_to_dev(idev),
+ "%s: current state %s, current suspend_type %x dest state %d,"
+ " arg suspend_reason %d, arg suspend_type %x",
+ __func__, rnc_state_name(state), sci_rnc->suspend_type,
+ sci_rnc->destination_state, suspend_reason,
+ suspend_type);
+
+ /* Disable automatic state continuations if explicitly suspending. */
+ if ((suspend_reason == SCI_HW_SUSPEND) ||
+ (sci_rnc->destination_state == RNC_DEST_FINAL))
+ dest_param = sci_rnc->destination_state;
- state = sci_rnc->sm.current_state_id;
- if (state != SCI_RNC_READY) {
+ switch (state) {
+ case SCI_RNC_READY:
+ break;
+ case SCI_RNC_INVALIDATING:
+ if (sci_rnc->destination_state == RNC_DEST_FINAL) {
+ dev_warn(scirdev_to_dev(idev),
+ "%s: already destroying %p\n",
+ __func__, sci_rnc);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+ /* Fall through and handle like SCI_RNC_POSTING */
+ case SCI_RNC_RESUMING:
+ /* Fall through and handle like SCI_RNC_POSTING */
+ case SCI_RNC_POSTING:
+ /* Set the destination state to AWAIT - this signals the
+ * entry into the SCI_RNC_READY state that a suspension
+ * needs to be done immediately.
+ */
+ if (sci_rnc->destination_state != RNC_DEST_FINAL)
+ sci_rnc->destination_state = RNC_DEST_SUSPENDED;
+ sci_rnc->suspend_type = suspend_type;
+ sci_rnc->suspend_reason = suspend_reason;
+ return SCI_SUCCESS;
+
+ case SCI_RNC_TX_SUSPENDED:
+ if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX)
+ status = SCI_SUCCESS;
+ break;
+ case SCI_RNC_TX_RX_SUSPENDED:
+ if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
+ status = SCI_SUCCESS;
+ break;
+ case SCI_RNC_AWAIT_SUSPENSION:
+ if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX)
+ || (suspend_type == sci_rnc->suspend_type))
+ return SCI_SUCCESS;
+ break;
+ default:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
- "%s: invalid state %d\n", __func__, state);
+ "%s: invalid state %s\n", __func__,
+ rnc_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
+ sci_rnc->destination_state = dest_param;
+ sci_rnc->suspend_type = suspend_type;
+ sci_rnc->suspend_reason = suspend_reason;
+
+ if (status == SCI_SUCCESS) { /* Already in the destination state? */
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ wake_up_all(&ihost->eventq); /* Let observers look. */
+ return SCI_SUCCESS;
+ }
+ if ((suspend_reason == SCI_SW_SUSPEND_NORMAL) ||
+ (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)) {
- sci_rnc->user_callback = cb_fn;
- sci_rnc->user_cookie = cb_p;
- sci_rnc->suspension_code = suspend_type;
+ if (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)
+ isci_dev_set_hang_detection_timeout(idev, 0x00000001);
- if (suspend_type == SCI_SOFTWARE_SUSPENSION) {
- sci_remote_device_post_request(rnc_to_dev(sci_rnc),
- SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX);
+ sci_remote_device_post_request(
+ idev, SCI_SOFTWARE_SUSPEND_CMD);
}
+ if (state != SCI_RNC_AWAIT_SUSPENSION)
+ sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
- sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
return SCI_SUCCESS;
}
@@ -522,56 +661,86 @@ enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *s
void *cb_p)
{
enum scis_sds_remote_node_context_states state;
+ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
state = sci_rnc->sm.current_state_id;
+ dev_dbg(scirdev_to_dev(idev),
+ "%s: state %s, cb_fn = %p, cb_p = %p; dest_state = %d; "
+ "dev resume path %s\n",
+ __func__, rnc_state_name(state), cb_fn, cb_p,
+ sci_rnc->destination_state,
+ test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)
+ ? "<abort active>" : "<normal>");
+
switch (state) {
case SCI_RNC_INITIAL:
if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
return SCI_FAILURE_INVALID_STATE;
- sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
- sci_remote_node_context_construct_buffer(sci_rnc);
- sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
+ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p,
+ RNC_DEST_READY);
+ if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) {
+ sci_remote_node_context_construct_buffer(sci_rnc);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
+ }
return SCI_SUCCESS;
+
case SCI_RNC_POSTING:
case SCI_RNC_INVALIDATING:
case SCI_RNC_RESUMING:
- if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
- return SCI_FAILURE_INVALID_STATE;
-
- sci_rnc->user_callback = cb_fn;
- sci_rnc->user_cookie = cb_p;
+ /* We are still waiting to post when a resume was
+ * requested.
+ */
+ switch (sci_rnc->destination_state) {
+ case RNC_DEST_SUSPENDED:
+ case RNC_DEST_SUSPENDED_RESUME:
+ /* Previously waiting to suspend after posting.
+ * Now continue onto resumption.
+ */
+ sci_remote_node_context_setup_to_resume(
+ sci_rnc, cb_fn, cb_p,
+ RNC_DEST_SUSPENDED_RESUME);
+ break;
+ default:
+ sci_remote_node_context_setup_to_resume(
+ sci_rnc, cb_fn, cb_p,
+ RNC_DEST_READY);
+ break;
+ }
return SCI_SUCCESS;
- case SCI_RNC_TX_SUSPENDED: {
- struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
- struct domain_device *dev = idev->domain_dev;
-
- sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
-
- /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */
- if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev))
- sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
- else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
- if (idev->is_direct_attached) {
- /* @todo Fix this since I am being silly in writing to the STPTLDARNI register. */
- sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
- } else {
- sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
+
+ case SCI_RNC_TX_SUSPENDED:
+ case SCI_RNC_TX_RX_SUSPENDED:
+ {
+ struct domain_device *dev = idev->domain_dev;
+ /* If this is an expander attached SATA device we must
+ * invalidate and repost the RNC since this is the only
+ * way to clear the TCi to NCQ tag mapping table for
+ * the RNi. All other device types we can just resume.
+ */
+ sci_remote_node_context_setup_to_resume(
+ sci_rnc, cb_fn, cb_p, RNC_DEST_READY);
+
+ if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) {
+ if ((dev_is_sata(dev) && dev->parent) ||
+ (sci_rnc->destination_state == RNC_DEST_FINAL))
+ sci_change_state(&sci_rnc->sm,
+ SCI_RNC_INVALIDATING);
+ else
+ sci_change_state(&sci_rnc->sm,
+ SCI_RNC_RESUMING);
}
- } else
- return SCI_FAILURE;
+ }
return SCI_SUCCESS;
- }
- case SCI_RNC_TX_RX_SUSPENDED:
- sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
- sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
- return SCI_FAILURE_INVALID_STATE;
+
case SCI_RNC_AWAIT_SUSPENSION:
- sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+ sci_remote_node_context_setup_to_resume(
+ sci_rnc, cb_fn, cb_p, RNC_DEST_SUSPENDED_RESUME);
return SCI_SUCCESS;
default:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
- "%s: invalid state %d\n", __func__, state);
+ "%s: invalid state %s\n", __func__,
+ rnc_state_name(state));
return SCI_FAILURE_INVALID_STATE;
}
}
@@ -590,35 +759,51 @@ enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context
case SCI_RNC_TX_RX_SUSPENDED:
case SCI_RNC_AWAIT_SUSPENSION:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
- "%s: invalid state %d\n", __func__, state);
+ "%s: invalid state %s\n", __func__,
+ rnc_state_name(state));
return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
default:
- break;
+ dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %s\n", __func__,
+ rnc_state_name(state));
+ return SCI_FAILURE_INVALID_STATE;
}
- dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
- "%s: requested to start IO while still resuming, %d\n",
- __func__, state);
- return SCI_FAILURE_INVALID_STATE;
}
-enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
- struct isci_request *ireq)
+enum sci_status sci_remote_node_context_start_task(
+ struct sci_remote_node_context *sci_rnc,
+ struct isci_request *ireq,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p)
+{
+ enum sci_status status = sci_remote_node_context_resume(sci_rnc,
+ cb_fn, cb_p);
+ if (status != SCI_SUCCESS)
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: resume failed: %d\n", __func__, status);
+ return status;
+}
+
+int sci_remote_node_context_is_safe_to_abort(
+ struct sci_remote_node_context *sci_rnc)
{
enum scis_sds_remote_node_context_states state;
state = sci_rnc->sm.current_state_id;
switch (state) {
+ case SCI_RNC_INVALIDATING:
+ case SCI_RNC_TX_RX_SUSPENDED:
+ return 1;
+ case SCI_RNC_POSTING:
case SCI_RNC_RESUMING:
case SCI_RNC_READY:
- case SCI_RNC_AWAIT_SUSPENSION:
- return SCI_SUCCESS;
case SCI_RNC_TX_SUSPENDED:
- case SCI_RNC_TX_RX_SUSPENDED:
- sci_remote_node_context_resume(sci_rnc, NULL, NULL);
- return SCI_SUCCESS;
+ case SCI_RNC_AWAIT_SUSPENSION:
+ case SCI_RNC_INITIAL:
+ return 0;
default:
dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
"%s: invalid state %d\n", __func__, state);
- return SCI_FAILURE_INVALID_STATE;
+ return 0;
}
}
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h
index a241e0f..a703b9c 100644
--- a/drivers/scsi/isci/remote_node_context.h
+++ b/drivers/scsi/isci/remote_node_context.h
@@ -75,8 +75,13 @@
*/
#define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX 0x0FFF
-#define SCU_HARDWARE_SUSPENSION (0)
-#define SCI_SOFTWARE_SUSPENSION (1)
+enum sci_remote_node_suspension_reasons {
+ SCI_HW_SUSPEND,
+ SCI_SW_SUSPEND_NORMAL,
+ SCI_SW_SUSPEND_LINKHANG_DETECT
+};
+#define SCI_SOFTWARE_SUSPEND_CMD SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX
+#define SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT SCU_EVENT_TL_RNC_SUSPEND_TX_RX
struct isci_request;
struct isci_remote_device;
@@ -137,9 +142,13 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state);
* node context.
*/
enum sci_remote_node_context_destination_state {
- SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED,
- SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY,
- SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL
+ RNC_DEST_UNSPECIFIED,
+ RNC_DEST_READY,
+ RNC_DEST_FINAL,
+ RNC_DEST_SUSPENDED, /* Set when suspend during post/invalidate */
+ RNC_DEST_SUSPENDED_RESUME /* Set when a resume was done during posting
+ * or invalidating and already suspending.
+ */
};
/**
@@ -156,10 +165,12 @@ struct sci_remote_node_context {
u16 remote_node_index;
/**
- * This field is the recored suspension code or the reason for the remote node
+ * This field is the recored suspension type of the remote node
* context suspension.
*/
- u32 suspension_code;
+ u32 suspend_type;
+ enum sci_remote_node_suspension_reasons suspend_reason;
+ u32 suspend_count;
/**
* This field is true if the remote node context is resuming from its current
@@ -193,6 +204,8 @@ void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
bool sci_remote_node_context_is_ready(
struct sci_remote_node_context *sci_rnc);
+bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc);
+
enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
u32 event_code);
enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
@@ -200,14 +213,24 @@ enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context
void *callback_parameter);
enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
u32 suspend_type,
- scics_sds_remote_node_context_callback cb_fn,
- void *cb_p);
+ u32 suspension_code);
enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
scics_sds_remote_node_context_callback cb_fn,
void *cb_p);
enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
- struct isci_request *ireq);
+ struct isci_request *ireq,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p);
enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
struct isci_request *ireq);
+int sci_remote_node_context_is_safe_to_abort(
+ struct sci_remote_node_context *sci_rnc);
+static inline bool sci_remote_node_context_is_being_destroyed(
+ struct sci_remote_node_context *sci_rnc)
+{
+ return (sci_rnc->destination_state == RNC_DEST_FINAL)
+ || ((sci_rnc->sm.current_state_id == SCI_RNC_INITIAL)
+ && (sci_rnc->destination_state == RNC_DEST_UNSPECIFIED));
+}
#endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 2def1e3..7a0431c 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -92,11 +92,11 @@ static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
if (idx == 0) {
offset = (void *) &ireq->tc->sgl_pair_ab -
(void *) &ihost->task_context_table[0];
- return ihost->task_context_dma + offset;
+ return ihost->tc_dma + offset;
} else if (idx == 1) {
offset = (void *) &ireq->tc->sgl_pair_cd -
(void *) &ihost->task_context_table[0];
- return ihost->task_context_dma + offset;
+ return ihost->tc_dma + offset;
}
return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
@@ -730,7 +730,7 @@ static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *i
{
struct sas_task *task = isci_request_access_task(ireq);
- ireq->protocol = SCIC_SSP_PROTOCOL;
+ ireq->protocol = SAS_PROTOCOL_SSP;
scu_ssp_io_request_construct_task_context(ireq,
task->data_dir,
@@ -763,7 +763,7 @@ static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *
bool copy = false;
struct sas_task *task = isci_request_access_task(ireq);
- ireq->protocol = SCIC_STP_PROTOCOL;
+ ireq->protocol = SAS_PROTOCOL_STP;
copy = (task->data_dir == DMA_NONE) ? false : true;
@@ -863,6 +863,8 @@ sci_io_request_terminate(struct isci_request *ireq)
switch (state) {
case SCI_REQ_CONSTRUCTED:
+ /* Set to make sure no HW terminate posting is done: */
+ set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags);
ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
@@ -883,8 +885,7 @@ sci_io_request_terminate(struct isci_request *ireq)
case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
case SCI_REQ_ATAPI_WAIT_D2H:
case SCI_REQ_ATAPI_WAIT_TC_COMP:
- sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
- return SCI_SUCCESS;
+ /* Fall through and change state to ABORTING... */
case SCI_REQ_TASK_WAIT_TC_RESP:
/* The task frame was already confirmed to have been
* sent by the SCU HW. Since the state machine is
@@ -893,20 +894,21 @@ sci_io_request_terminate(struct isci_request *ireq)
* and don't wait for the task response.
*/
sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
- sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
- return SCI_SUCCESS;
+ /* Fall through and handle like ABORTING... */
case SCI_REQ_ABORTING:
- /* If a request has a termination requested twice, return
- * a failure indication, since HW confirmation of the first
- * abort is still outstanding.
+ if (!isci_remote_device_is_safe_to_abort(ireq->target_device))
+ set_bit(IREQ_PENDING_ABORT, &ireq->flags);
+ else
+ clear_bit(IREQ_PENDING_ABORT, &ireq->flags);
+ /* If the request is only waiting on the remote device
+ * suspension, return SUCCESS so the caller will wait too.
*/
+ return SCI_SUCCESS;
case SCI_REQ_COMPLETED:
default:
dev_warn(&ireq->owning_controller->pdev->dev,
"%s: SCIC IO Request requested to abort while in wrong "
- "state %d\n",
- __func__,
- ireq->sm.current_state_id);
+ "state %d\n", __func__, ireq->sm.current_state_id);
break;
}
@@ -1070,7 +1072,7 @@ request_started_state_tc_event(struct isci_request *ireq,
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
- if (ireq->protocol == SCIC_STP_PROTOCOL) {
+ if (ireq->protocol == SAS_PROTOCOL_STP) {
ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
SCU_COMPLETION_TL_STATUS_SHIFT;
ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
@@ -2117,7 +2119,7 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
*/
if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
sci_remote_device_suspend(ireq->target_device,
- SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
+ SCI_SW_SUSPEND_NORMAL);
ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
@@ -2138,13 +2140,6 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
/* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
* - this comes only for B0
*/
- case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
- case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
- case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
- case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
- sci_remote_device_suspend(ireq->target_device,
- SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
- /* Fall through to the default case */
default:
/* All other completion status cause the IO to be complete. */
ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
@@ -2262,15 +2257,151 @@ static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ire
return status;
}
+static int sci_request_smp_completion_status_is_tx_suspend(
+ unsigned int completion_status)
+{
+ switch (completion_status) {
+ case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
+ case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
+ case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
+ return 1;
+ }
+ return 0;
+}
+
+static int sci_request_smp_completion_status_is_tx_rx_suspend(
+ unsigned int completion_status)
+{
+ return 0; /* There are no Tx/Rx SMP suspend conditions. */
+}
+
+static int sci_request_ssp_completion_status_is_tx_suspend(
+ unsigned int completion_status)
+{
+ switch (completion_status) {
+ case SCU_TASK_DONE_TX_RAW_CMD_ERR:
+ case SCU_TASK_DONE_LF_ERR:
+ case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
+ case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
+ case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
+ case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
+ case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
+ case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
+ return 1;
+ }
+ return 0;
+}
+
+static int sci_request_ssp_completion_status_is_tx_rx_suspend(
+ unsigned int completion_status)
+{
+ return 0; /* There are no Tx/Rx SSP suspend conditions. */
+}
+
+static int sci_request_stpsata_completion_status_is_tx_suspend(
+ unsigned int completion_status)
+{
+ switch (completion_status) {
+ case SCU_TASK_DONE_TX_RAW_CMD_ERR:
+ case SCU_TASK_DONE_LL_R_ERR:
+ case SCU_TASK_DONE_LL_PERR:
+ case SCU_TASK_DONE_REG_ERR:
+ case SCU_TASK_DONE_SDB_ERR:
+ case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
+ case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
+ case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
+ case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
+ case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
+ case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
+ return 1;
+ }
+ return 0;
+}
+
+
+static int sci_request_stpsata_completion_status_is_tx_rx_suspend(
+ unsigned int completion_status)
+{
+ switch (completion_status) {
+ case SCU_TASK_DONE_LF_ERR:
+ case SCU_TASK_DONE_LL_SY_TERM:
+ case SCU_TASK_DONE_LL_LF_TERM:
+ case SCU_TASK_DONE_BREAK_RCVD:
+ case SCU_TASK_DONE_INV_FIS_LEN:
+ case SCU_TASK_DONE_UNEXP_FIS:
+ case SCU_TASK_DONE_UNEXP_SDBFIS:
+ case SCU_TASK_DONE_MAX_PLD_ERR:
+ return 1;
+ }
+ return 0;
+}
+
+static void sci_request_handle_suspending_completions(
+ struct isci_request *ireq,
+ u32 completion_code)
+{
+ int is_tx = 0;
+ int is_tx_rx = 0;
+
+ switch (ireq->protocol) {
+ case SAS_PROTOCOL_SMP:
+ is_tx = sci_request_smp_completion_status_is_tx_suspend(
+ completion_code);
+ is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend(
+ completion_code);
+ break;
+ case SAS_PROTOCOL_SSP:
+ is_tx = sci_request_ssp_completion_status_is_tx_suspend(
+ completion_code);
+ is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend(
+ completion_code);
+ break;
+ case SAS_PROTOCOL_STP:
+ is_tx = sci_request_stpsata_completion_status_is_tx_suspend(
+ completion_code);
+ is_tx_rx =
+ sci_request_stpsata_completion_status_is_tx_rx_suspend(
+ completion_code);
+ break;
+ default:
+ dev_warn(&ireq->isci_host->pdev->dev,
+ "%s: request %p has no valid protocol\n",
+ __func__, ireq);
+ break;
+ }
+ if (is_tx || is_tx_rx) {
+ BUG_ON(is_tx && is_tx_rx);
+
+ sci_remote_node_context_suspend(
+ &ireq->target_device->rnc,
+ SCI_HW_SUSPEND,
+ (is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX
+ : SCU_EVENT_TL_RNC_SUSPEND_TX);
+ }
+}
+
enum sci_status
sci_io_request_tc_completion(struct isci_request *ireq,
- u32 completion_code)
+ u32 completion_code)
{
enum sci_base_request_states state;
struct isci_host *ihost = ireq->owning_controller;
state = ireq->sm.current_state_id;
+ /* Decode those completions that signal upcoming suspension events. */
+ sci_request_handle_suspending_completions(
+ ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code));
+
switch (state) {
case SCI_REQ_STARTED:
return request_started_state_tc_event(ireq, completion_code);
@@ -2362,9 +2493,6 @@ static void isci_request_process_response_iu(
* @request: This parameter is the completed isci_request object.
* @response_ptr: This parameter specifies the service response for the I/O.
* @status_ptr: This parameter specifies the exec status for the I/O.
- * @complete_to_host_ptr: This parameter specifies the action to be taken by
- * the LLDD with respect to completing this request or forcing an abort
- * condition on the I/O.
* @open_rej_reason: This parameter specifies the encoded reason for the
* abandon-class reject.
*
@@ -2375,14 +2503,12 @@ static void isci_request_set_open_reject_status(
struct sas_task *task,
enum service_response *response_ptr,
enum exec_status *status_ptr,
- enum isci_completion_selection *complete_to_host_ptr,
enum sas_open_rej_reason open_rej_reason)
{
/* Task in the target is done. */
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
*response_ptr = SAS_TASK_UNDELIVERED;
*status_ptr = SAS_OPEN_REJECT;
- *complete_to_host_ptr = isci_perform_normal_io_completion;
task->task_status.open_rej_reason = open_rej_reason;
}
@@ -2392,9 +2518,6 @@ static void isci_request_set_open_reject_status(
* @request: This parameter is the completed isci_request object.
* @response_ptr: This parameter specifies the service response for the I/O.
* @status_ptr: This parameter specifies the exec status for the I/O.
- * @complete_to_host_ptr: This parameter specifies the action to be taken by
- * the LLDD with respect to completing this request or forcing an abort
- * condition on the I/O.
*
* none.
*/
@@ -2403,8 +2526,7 @@ static void isci_request_handle_controller_specific_errors(
struct isci_request *request,
struct sas_task *task,
enum service_response *response_ptr,
- enum exec_status *status_ptr,
- enum isci_completion_selection *complete_to_host_ptr)
+ enum exec_status *status_ptr)
{
unsigned int cstatus;
@@ -2445,9 +2567,6 @@ static void isci_request_handle_controller_specific_errors(
*status_ptr = SAS_ABORTED_TASK;
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-
- *complete_to_host_ptr =
- isci_perform_normal_io_completion;
} else {
/* Task in the target is not done. */
*response_ptr = SAS_TASK_UNDELIVERED;
@@ -2458,9 +2577,6 @@ static void isci_request_handle_controller_specific_errors(
*status_ptr = SAM_STAT_TASK_ABORTED;
clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-
- *complete_to_host_ptr =
- isci_perform_error_io_completion;
}
break;
@@ -2489,8 +2605,6 @@ static void isci_request_handle_controller_specific_errors(
*status_ptr = SAS_ABORTED_TASK;
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-
- *complete_to_host_ptr = isci_perform_normal_io_completion;
break;
@@ -2501,7 +2615,7 @@ static void isci_request_handle_controller_specific_errors(
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
- complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
+ SAS_OREJ_WRONG_DEST);
break;
case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
@@ -2511,56 +2625,56 @@ static void isci_request_handle_controller_specific_errors(
*/
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
- complete_to_host_ptr, SAS_OREJ_RESV_AB0);
+ SAS_OREJ_RESV_AB0);
break;
case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
- complete_to_host_ptr, SAS_OREJ_RESV_AB1);
+ SAS_OREJ_RESV_AB1);
break;
case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
- complete_to_host_ptr, SAS_OREJ_RESV_AB2);
+ SAS_OREJ_RESV_AB2);
break;
case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
- complete_to_host_ptr, SAS_OREJ_RESV_AB3);
+ SAS_OREJ_RESV_AB3);
break;
case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
- complete_to_host_ptr, SAS_OREJ_BAD_DEST);
+ SAS_OREJ_BAD_DEST);
break;
case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
- complete_to_host_ptr, SAS_OREJ_STP_NORES);
+ SAS_OREJ_STP_NORES);
break;
case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
- complete_to_host_ptr, SAS_OREJ_EPROTO);
+ SAS_OREJ_EPROTO);
break;
case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
isci_request_set_open_reject_status(
request, task, response_ptr, status_ptr,
- complete_to_host_ptr, SAS_OREJ_CONN_RATE);
+ SAS_OREJ_CONN_RATE);
break;
case SCU_TASK_DONE_LL_R_ERR:
@@ -2592,95 +2706,12 @@ static void isci_request_handle_controller_specific_errors(
*response_ptr = SAS_TASK_UNDELIVERED;
*status_ptr = SAM_STAT_TASK_ABORTED;
- if (task->task_proto == SAS_PROTOCOL_SMP) {
+ if (task->task_proto == SAS_PROTOCOL_SMP)
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-
- *complete_to_host_ptr = isci_perform_normal_io_completion;
- } else {
+ else
clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
-
- *complete_to_host_ptr = isci_perform_error_io_completion;
- }
- break;
- }
-}
-
-/**
- * isci_task_save_for_upper_layer_completion() - This function saves the
- * request for later completion to the upper layer driver.
- * @host: This parameter is a pointer to the host on which the the request
- * should be queued (either as an error or success).
- * @request: This parameter is the completed request.
- * @response: This parameter is the response code for the completed task.
- * @status: This parameter is the status code for the completed task.
- *
- * none.
- */
-static void isci_task_save_for_upper_layer_completion(
- struct isci_host *host,
- struct isci_request *request,
- enum service_response response,
- enum exec_status status,
- enum isci_completion_selection task_notification_selection)
-{
- struct sas_task *task = isci_request_access_task(request);
-
- task_notification_selection
- = isci_task_set_completion_status(task, response, status,
- task_notification_selection);
-
- /* Tasks aborted specifically by a call to the lldd_abort_task
- * function should not be completed to the host in the regular path.
- */
- switch (task_notification_selection) {
-
- case isci_perform_normal_io_completion:
- /* Normal notification (task_done) */
-
- /* Add to the completed list. */
- list_add(&request->completed_node,
- &host->requests_to_complete);
-
- /* Take the request off the device's pending request list. */
- list_del_init(&request->dev_node);
- break;
-
- case isci_perform_aborted_io_completion:
- /* No notification to libsas because this request is
- * already in the abort path.
- */
- /* Wake up whatever process was waiting for this
- * request to complete.
- */
- WARN_ON(request->io_request_completion == NULL);
-
- if (request->io_request_completion != NULL) {
-
- /* Signal whoever is waiting that this
- * request is complete.
- */
- complete(request->io_request_completion);
- }
- break;
-
- case isci_perform_error_io_completion:
- /* Use sas_task_abort */
- /* Add to the aborted list. */
- list_add(&request->completed_node,
- &host->requests_to_errorback);
- break;
-
- default:
- /* Add to the error to libsas list. */
- list_add(&request->completed_node,
- &host->requests_to_errorback);
break;
}
- dev_dbg(&host->pdev->dev,
- "%s: %d - task = %p, response=%d (%d), status=%d (%d)\n",
- __func__, task_notification_selection, task,
- (task) ? task->task_status.resp : 0, response,
- (task) ? task->task_status.stat : 0, status);
}
static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis)
@@ -2715,295 +2746,164 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
struct isci_remote_device *idev = request->target_device;
enum service_response response = SAS_TASK_UNDELIVERED;
enum exec_status status = SAS_ABORTED_TASK;
- enum isci_request_status request_status;
- enum isci_completion_selection complete_to_host
- = isci_perform_normal_io_completion;
dev_dbg(&ihost->pdev->dev,
- "%s: request = %p, task = %p,\n"
+ "%s: request = %p, task = %p, "
"task->data_dir = %d completion_status = 0x%x\n",
- __func__,
- request,
- task,
- task->data_dir,
- completion_status);
+ __func__, request, task, task->data_dir, completion_status);
- spin_lock(&request->state_lock);
- request_status = request->status;
+ /* The request is done from an SCU HW perspective. */
- /* Decode the request status. Note that if the request has been
- * aborted by a task management function, we don't care
- * what the status is.
- */
- switch (request_status) {
-
- case aborted:
- /* "aborted" indicates that the request was aborted by a task
- * management function, since once a task management request is
- * perfomed by the device, the request only completes because
- * of the subsequent driver terminate.
- *
- * Aborted also means an external thread is explicitly managing
- * this request, so that we do not complete it up the stack.
- *
- * The target is still there (since the TMF was successful).
- */
- set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
- response = SAS_TASK_COMPLETE;
+ /* This is an active request being completed from the core. */
+ switch (completion_status) {
- /* See if the device has been/is being stopped. Note
- * that we ignore the quiesce state, since we are
- * concerned about the actual device state.
- */
- if (!idev)
- status = SAS_DEVICE_UNKNOWN;
- else
- status = SAS_ABORTED_TASK;
+ case SCI_IO_FAILURE_RESPONSE_VALID:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
+ __func__, request, task);
+
+ if (sas_protocol_ata(task->task_proto)) {
+ isci_process_stp_response(task, &request->stp.rsp);
+ } else if (SAS_PROTOCOL_SSP == task->task_proto) {
+
+ /* crack the iu response buffer. */
+ resp_iu = &request->ssp.rsp;
+ isci_request_process_response_iu(task, resp_iu,
+ &ihost->pdev->dev);
+
+ } else if (SAS_PROTOCOL_SMP == task->task_proto) {
+
+ dev_err(&ihost->pdev->dev,
+ "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
+ "SAS_PROTOCOL_SMP protocol\n",
+ __func__);
- complete_to_host = isci_perform_aborted_io_completion;
- /* This was an aborted request. */
+ } else
+ dev_err(&ihost->pdev->dev,
+ "%s: unknown protocol\n", __func__);
- spin_unlock(&request->state_lock);
+ /* use the task status set in the task struct by the
+ * isci_request_process_response_iu call.
+ */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ response = task->task_status.resp;
+ status = task->task_status.stat;
break;
- case aborting:
- /* aborting means that the task management function tried and
- * failed to abort the request. We need to note the request
- * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
- * target as down.
- *
- * Aborting also means an external thread is explicitly managing
- * this request, so that we do not complete it up the stack.
- */
+ case SCI_IO_SUCCESS:
+ case SCI_IO_SUCCESS_IO_DONE_EARLY:
+
+ response = SAS_TASK_COMPLETE;
+ status = SAM_STAT_GOOD;
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
- response = SAS_TASK_UNDELIVERED;
- if (!idev)
- /* The device has been /is being stopped. Note that
- * we ignore the quiesce state, since we are
- * concerned about the actual device state.
- */
- status = SAS_DEVICE_UNKNOWN;
- else
- status = SAS_PHY_DOWN;
+ if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
- complete_to_host = isci_perform_aborted_io_completion;
+ /* This was an SSP / STP / SATA transfer.
+ * There is a possibility that less data than
+ * the maximum was transferred.
+ */
+ u32 transferred_length = sci_req_tx_bytes(request);
- /* This was an aborted request. */
+ task->task_status.residual
+ = task->total_xfer_len - transferred_length;
+
+ /* If there were residual bytes, call this an
+ * underrun.
+ */
+ if (task->task_status.residual != 0)
+ status = SAS_DATA_UNDERRUN;
- spin_unlock(&request->state_lock);
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
+ __func__, status);
+
+ } else
+ dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n",
+ __func__);
break;
- case terminating:
+ case SCI_IO_FAILURE_TERMINATED:
- /* This was an terminated request. This happens when
- * the I/O is being terminated because of an action on
- * the device (reset, tear down, etc.), and the I/O needs
- * to be completed up the stack.
- */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
+ __func__, request, task);
+
+ /* The request was terminated explicitly. */
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
response = SAS_TASK_UNDELIVERED;
/* See if the device has been/is being stopped. Note
- * that we ignore the quiesce state, since we are
- * concerned about the actual device state.
- */
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
if (!idev)
status = SAS_DEVICE_UNKNOWN;
else
status = SAS_ABORTED_TASK;
-
- complete_to_host = isci_perform_aborted_io_completion;
-
- /* This was a terminated request. */
-
- spin_unlock(&request->state_lock);
break;
- case dead:
- /* This was a terminated request that timed-out during the
- * termination process. There is no task to complete to
- * libsas.
- */
- complete_to_host = isci_perform_normal_io_completion;
- spin_unlock(&request->state_lock);
- break;
+ case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
- default:
-
- /* The request is done from an SCU HW perspective. */
- request->status = completed;
-
- spin_unlock(&request->state_lock);
-
- /* This is an active request being completed from the core. */
- switch (completion_status) {
-
- case SCI_IO_FAILURE_RESPONSE_VALID:
- dev_dbg(&ihost->pdev->dev,
- "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
- __func__,
- request,
- task);
-
- if (sas_protocol_ata(task->task_proto)) {
- isci_process_stp_response(task, &request->stp.rsp);
- } else if (SAS_PROTOCOL_SSP == task->task_proto) {
-
- /* crack the iu response buffer. */
- resp_iu = &request->ssp.rsp;
- isci_request_process_response_iu(task, resp_iu,
- &ihost->pdev->dev);
-
- } else if (SAS_PROTOCOL_SMP == task->task_proto) {
-
- dev_err(&ihost->pdev->dev,
- "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
- "SAS_PROTOCOL_SMP protocol\n",
- __func__);
-
- } else
- dev_err(&ihost->pdev->dev,
- "%s: unknown protocol\n", __func__);
-
- /* use the task status set in the task struct by the
- * isci_request_process_response_iu call.
- */
- set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
- response = task->task_status.resp;
- status = task->task_status.stat;
- break;
+ isci_request_handle_controller_specific_errors(idev, request,
+ task, &response,
+ &status);
+ break;
- case SCI_IO_SUCCESS:
- case SCI_IO_SUCCESS_IO_DONE_EARLY:
+ case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
+ /* This is a special case, in that the I/O completion
+ * is telling us that the device needs a reset.
+ * In order for the device reset condition to be
+ * noticed, the I/O has to be handled in the error
+ * handler. Set the reset flag and cause the
+ * SCSI error thread to be scheduled.
+ */
+ spin_lock_irqsave(&task->task_state_lock, task_flags);
+ task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+ spin_unlock_irqrestore(&task->task_state_lock, task_flags);
- response = SAS_TASK_COMPLETE;
- status = SAM_STAT_GOOD;
- set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ /* Fail the I/O. */
+ response = SAS_TASK_UNDELIVERED;
+ status = SAM_STAT_TASK_ABORTED;
- if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
+ clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ break;
- /* This was an SSP / STP / SATA transfer.
- * There is a possibility that less data than
- * the maximum was transferred.
- */
- u32 transferred_length = sci_req_tx_bytes(request);
+ case SCI_FAILURE_RETRY_REQUIRED:
- task->task_status.residual
- = task->total_xfer_len - transferred_length;
+ /* Fail the I/O so it can be retried. */
+ response = SAS_TASK_UNDELIVERED;
+ if (!idev)
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
- /* If there were residual bytes, call this an
- * underrun.
- */
- if (task->task_status.residual != 0)
- status = SAS_DATA_UNDERRUN;
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ break;
- dev_dbg(&ihost->pdev->dev,
- "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
- __func__,
- status);
- } else
- dev_dbg(&ihost->pdev->dev,
- "%s: SCI_IO_SUCCESS\n",
- __func__);
+ default:
+ /* Catch any otherwise unhandled error codes here. */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: invalid completion code: 0x%x - "
+ "isci_request = %p\n",
+ __func__, completion_status, request);
- break;
+ response = SAS_TASK_UNDELIVERED;
- case SCI_IO_FAILURE_TERMINATED:
- dev_dbg(&ihost->pdev->dev,
- "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
- __func__,
- request,
- task);
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
- /* The request was terminated explicitly. No handling
- * is needed in the SCSI error handler path.
- */
+ if (SAS_PROTOCOL_SMP == task->task_proto)
set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
- response = SAS_TASK_UNDELIVERED;
-
- /* See if the device has been/is being stopped. Note
- * that we ignore the quiesce state, since we are
- * concerned about the actual device state.
- */
- if (!idev)
- status = SAS_DEVICE_UNKNOWN;
- else
- status = SAS_ABORTED_TASK;
-
- complete_to_host = isci_perform_normal_io_completion;
- break;
-
- case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
-
- isci_request_handle_controller_specific_errors(
- idev, request, task, &response, &status,
- &complete_to_host);
-
- break;
-
- case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
- /* This is a special case, in that the I/O completion
- * is telling us that the device needs a reset.
- * In order for the device reset condition to be
- * noticed, the I/O has to be handled in the error
- * handler. Set the reset flag and cause the
- * SCSI error thread to be scheduled.
- */
- spin_lock_irqsave(&task->task_state_lock, task_flags);
- task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
- spin_unlock_irqrestore(&task->task_state_lock, task_flags);
-
- /* Fail the I/O. */
- response = SAS_TASK_UNDELIVERED;
- status = SAM_STAT_TASK_ABORTED;
-
- complete_to_host = isci_perform_error_io_completion;
+ else
clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
- break;
-
- case SCI_FAILURE_RETRY_REQUIRED:
-
- /* Fail the I/O so it can be retried. */
- response = SAS_TASK_UNDELIVERED;
- if (!idev)
- status = SAS_DEVICE_UNKNOWN;
- else
- status = SAS_ABORTED_TASK;
-
- complete_to_host = isci_perform_normal_io_completion;
- set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
- break;
-
-
- default:
- /* Catch any otherwise unhandled error codes here. */
- dev_dbg(&ihost->pdev->dev,
- "%s: invalid completion code: 0x%x - "
- "isci_request = %p\n",
- __func__, completion_status, request);
-
- response = SAS_TASK_UNDELIVERED;
-
- /* See if the device has been/is being stopped. Note
- * that we ignore the quiesce state, since we are
- * concerned about the actual device state.
- */
- if (!idev)
- status = SAS_DEVICE_UNKNOWN;
- else
- status = SAS_ABORTED_TASK;
-
- if (SAS_PROTOCOL_SMP == task->task_proto) {
- set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
- complete_to_host = isci_perform_normal_io_completion;
- } else {
- clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
- complete_to_host = isci_perform_error_io_completion;
- }
- break;
- }
break;
}
@@ -3038,10 +2938,18 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
break;
}
- /* Put the completed request on the correct list */
- isci_task_save_for_upper_layer_completion(ihost, request, response,
- status, complete_to_host
- );
+ spin_lock_irqsave(&task->task_state_lock, task_flags);
+
+ task->task_status.resp = response;
+ task->task_status.stat = status;
+
+ if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) {
+ /* Normal notification (task_done) */
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+ SAS_TASK_STATE_PENDING);
+ }
+ spin_unlock_irqrestore(&task->task_state_lock, task_flags);
/* complete the io request to the core. */
sci_controller_complete_io(ihost, request->target_device, request);
@@ -3051,6 +2959,8 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
* task to recognize the already completed case.
*/
set_bit(IREQ_TERMINATED, &request->flags);
+
+ ireq_done(ihost, request, task);
}
static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
@@ -3169,7 +3079,7 @@ sci_general_request_construct(struct isci_host *ihost,
sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
ireq->target_device = idev;
- ireq->protocol = SCIC_NO_PROTOCOL;
+ ireq->protocol = SAS_PROTOCOL_NONE;
ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
ireq->sci_status = SCI_SUCCESS;
@@ -3193,7 +3103,7 @@ sci_io_request_construct(struct isci_host *ihost,
if (dev->dev_type == SAS_END_DEV)
/* pass */;
- else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
+ else if (dev_is_sata(dev))
memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
else if (dev_is_expander(dev))
/* pass */;
@@ -3215,10 +3125,15 @@ enum sci_status sci_task_request_construct(struct isci_host *ihost,
/* Build the common part of the request */
sci_general_request_construct(ihost, idev, ireq);
- if (dev->dev_type == SAS_END_DEV ||
- dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+ if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) {
set_bit(IREQ_TMF, &ireq->flags);
memset(ireq->tc, 0, sizeof(struct scu_task_context));
+
+ /* Set the protocol indicator. */
+ if (dev_is_sata(dev))
+ ireq->protocol = SAS_PROTOCOL_STP;
+ else
+ ireq->protocol = SAS_PROTOCOL_SSP;
} else
status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
@@ -3311,7 +3226,7 @@ sci_io_request_construct_smp(struct device *dev,
if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
return SCI_FAILURE;
- ireq->protocol = SCIC_SMP_PROTOCOL;
+ ireq->protocol = SAS_PROTOCOL_SMP;
/* byte swap the smp request. */
@@ -3496,9 +3411,6 @@ static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 t
ireq->io_request_completion = NULL;
ireq->flags = 0;
ireq->num_sg_entries = 0;
- INIT_LIST_HEAD(&ireq->completed_node);
- INIT_LIST_HEAD(&ireq->dev_node);
- isci_request_change_state(ireq, allocated);
return ireq;
}
@@ -3582,26 +3494,15 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide
spin_unlock_irqrestore(&ihost->scic_lock, flags);
return status;
}
-
/* Either I/O started OK, or the core has signaled that
* the device needs a target reset.
- *
- * In either case, hold onto the I/O for later.
- *
- * Update it's status and add it to the list in the
- * remote device object.
*/
- list_add(&ireq->dev_node, &idev->reqs_in_process);
-
- if (status == SCI_SUCCESS) {
- isci_request_change_state(ireq, started);
- } else {
+ if (status != SCI_SUCCESS) {
/* The request did not really start in the
* hardware, so clear the request handle
* here so no terminations will be done.
*/
set_bit(IREQ_TERMINATED, &ireq->flags);
- isci_request_change_state(ireq, completed);
}
spin_unlock_irqrestore(&ihost->scic_lock, flags);
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
index 057f237..aff9531 100644
--- a/drivers/scsi/isci/request.h
+++ b/drivers/scsi/isci/request.h
@@ -61,30 +61,6 @@
#include "scu_task_context.h"
/**
- * struct isci_request_status - This enum defines the possible states of an I/O
- * request.
- *
- *
- */
-enum isci_request_status {
- unallocated = 0x00,
- allocated = 0x01,
- started = 0x02,
- completed = 0x03,
- aborting = 0x04,
- aborted = 0x05,
- terminating = 0x06,
- dead = 0x07
-};
-
-enum sci_request_protocol {
- SCIC_NO_PROTOCOL,
- SCIC_SMP_PROTOCOL,
- SCIC_SSP_PROTOCOL,
- SCIC_STP_PROTOCOL
-}; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
-
-/**
* isci_stp_request - extra request infrastructure to handle pio/atapi protocol
* @pio_len - number of bytes requested at PIO setup
* @status - pio setup ending status value to tell us if we need
@@ -104,11 +80,14 @@ struct isci_stp_request {
};
struct isci_request {
- enum isci_request_status status;
#define IREQ_COMPLETE_IN_TARGET 0
#define IREQ_TERMINATED 1
#define IREQ_TMF 2
#define IREQ_ACTIVE 3
+ #define IREQ_PENDING_ABORT 4 /* Set == device was not suspended yet */
+ #define IREQ_TC_ABORT_POSTED 5
+ #define IREQ_ABORT_PATH_ACTIVE 6
+ #define IREQ_NO_AUTO_FREE_TAG 7 /* Set when being explicitly managed */
unsigned long flags;
/* XXX kill ttype and ttype_ptr, allocate full sas_task */
union ttype_ptr_union {
@@ -116,11 +95,6 @@ struct isci_request {
struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
} ttype_ptr;
struct isci_host *isci_host;
- /* For use in the requests_to_{complete|abort} lists: */
- struct list_head completed_node;
- /* For use in the reqs_in_process list: */
- struct list_head dev_node;
- spinlock_t state_lock;
dma_addr_t request_daddr;
dma_addr_t zero_scatter_daddr;
unsigned int num_sg_entries;
@@ -140,7 +114,7 @@ struct isci_request {
struct isci_host *owning_controller;
struct isci_remote_device *target_device;
u16 io_tag;
- enum sci_request_protocol protocol;
+ enum sas_protocol protocol;
u32 scu_status; /* hardware result */
u32 sci_status; /* upper layer disposition */
u32 post_context;
@@ -309,92 +283,6 @@ sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
return ireq->request_daddr + (requested_addr - base_addr);
}
-/**
- * isci_request_change_state() - This function sets the status of the request
- * object.
- * @request: This parameter points to the isci_request object
- * @status: This Parameter is the new status of the object
- *
- */
-static inline enum isci_request_status
-isci_request_change_state(struct isci_request *isci_request,
- enum isci_request_status status)
-{
- enum isci_request_status old_state;
- unsigned long flags;
-
- dev_dbg(&isci_request->isci_host->pdev->dev,
- "%s: isci_request = %p, state = 0x%x\n",
- __func__,
- isci_request,
- status);
-
- BUG_ON(isci_request == NULL);
-
- spin_lock_irqsave(&isci_request->state_lock, flags);
- old_state = isci_request->status;
- isci_request->status = status;
- spin_unlock_irqrestore(&isci_request->state_lock, flags);
-
- return old_state;
-}
-
-/**
- * isci_request_change_started_to_newstate() - This function sets the status of
- * the request object.
- * @request: This parameter points to the isci_request object
- * @status: This Parameter is the new status of the object
- *
- * state previous to any change.
- */
-static inline enum isci_request_status
-isci_request_change_started_to_newstate(struct isci_request *isci_request,
- struct completion *completion_ptr,
- enum isci_request_status newstate)
-{
- enum isci_request_status old_state;
- unsigned long flags;
-
- spin_lock_irqsave(&isci_request->state_lock, flags);
-
- old_state = isci_request->status;
-
- if (old_state == started || old_state == aborting) {
- BUG_ON(isci_request->io_request_completion != NULL);
-
- isci_request->io_request_completion = completion_ptr;
- isci_request->status = newstate;
- }
-
- spin_unlock_irqrestore(&isci_request->state_lock, flags);
-
- dev_dbg(&isci_request->isci_host->pdev->dev,
- "%s: isci_request = %p, old_state = 0x%x\n",
- __func__,
- isci_request,
- old_state);
-
- return old_state;
-}
-
-/**
- * isci_request_change_started_to_aborted() - This function sets the status of
- * the request object.
- * @request: This parameter points to the isci_request object
- * @completion_ptr: This parameter is saved as the kernel completion structure
- * signalled when the old request completes.
- *
- * state previous to any change.
- */
-static inline enum isci_request_status
-isci_request_change_started_to_aborted(struct isci_request *isci_request,
- struct completion *completion_ptr)
-{
- return isci_request_change_started_to_newstate(isci_request,
- completion_ptr,
- aborted);
-}
-
#define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
#define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
@@ -404,8 +292,6 @@ struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
u16 tag);
int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
struct sas_task *task, u16 tag);
-void isci_terminate_pending_requests(struct isci_host *ihost,
- struct isci_remote_device *idev);
enum sci_status
sci_task_request_construct(struct isci_host *ihost,
struct isci_remote_device *idev,
@@ -421,5 +307,4 @@ static inline int isci_task_is_ncq_recovery(struct sas_task *task)
task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ);
}
-
#endif /* !defined(_ISCI_REQUEST_H_) */
diff --git a/drivers/scsi/isci/scu_completion_codes.h b/drivers/scsi/isci/scu_completion_codes.h
index c8b329c..071cb74 100644
--- a/drivers/scsi/isci/scu_completion_codes.h
+++ b/drivers/scsi/isci/scu_completion_codes.h
@@ -224,6 +224,7 @@
* 32-bit value like we want, each immediate value must be cast to a u32.
*/
#define SCU_TASK_DONE_GOOD ((u32)0x00)
+#define SCU_TASK_DONE_TX_RAW_CMD_ERR ((u32)0x08)
#define SCU_TASK_DONE_CRC_ERR ((u32)0x14)
#define SCU_TASK_DONE_CHECK_RESPONSE ((u32)0x14)
#define SCU_TASK_DONE_GEN_RESPONSE ((u32)0x15)
@@ -237,6 +238,7 @@
#define SCU_TASK_DONE_LL_LF_TERM ((u32)0x1A)
#define SCU_TASK_DONE_DATA_LEN_ERR ((u32)0x1A)
#define SCU_TASK_DONE_LL_CL_TERM ((u32)0x1B)
+#define SCU_TASK_DONE_BREAK_RCVD ((u32)0x1B)
#define SCU_TASK_DONE_LL_ABORT_ERR ((u32)0x1B)
#define SCU_TASK_DONE_SEQ_INV_TYPE ((u32)0x1C)
#define SCU_TASK_DONE_UNEXP_XR ((u32)0x1C)
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 374254e..6bc74eb 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -78,54 +78,25 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
enum exec_status status)
{
- enum isci_completion_selection disposition;
+ unsigned long flags;
- disposition = isci_perform_normal_io_completion;
- disposition = isci_task_set_completion_status(task, response, status,
- disposition);
+ /* Normal notification (task_done) */
+ dev_dbg(&ihost->pdev->dev, "%s: task = %p, response=%d, status=%d\n",
+ __func__, task, response, status);
- /* Tasks aborted specifically by a call to the lldd_abort_task
- * function should not be completed to the host in the regular path.
- */
- switch (disposition) {
- case isci_perform_normal_io_completion:
- /* Normal notification (task_done) */
- dev_dbg(&ihost->pdev->dev,
- "%s: Normal - task = %p, response=%d, "
- "status=%d\n",
- __func__, task, response, status);
-
- task->lldd_task = NULL;
- task->task_done(task);
- break;
-
- case isci_perform_aborted_io_completion:
- /*
- * No notification because this request is already in the
- * abort path.
- */
- dev_dbg(&ihost->pdev->dev,
- "%s: Aborted - task = %p, response=%d, "
- "status=%d\n",
- __func__, task, response, status);
- break;
+ spin_lock_irqsave(&task->task_state_lock, flags);
- case isci_perform_error_io_completion:
- /* Use sas_task_abort */
- dev_dbg(&ihost->pdev->dev,
- "%s: Error - task = %p, response=%d, "
- "status=%d\n",
- __func__, task, response, status);
- sas_task_abort(task);
- break;
+ task->task_status.resp = response;
+ task->task_status.stat = status;
- default:
- dev_dbg(&ihost->pdev->dev,
- "%s: isci task notification default case!",
- __func__);
- sas_task_abort(task);
- break;
- }
+ /* Normal notification (task_done) */
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+ SAS_TASK_STATE_PENDING);
+ task->lldd_task = NULL;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ task->task_done(task);
}
#define for_each_sas_task(num, task) \
@@ -289,60 +260,6 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
return ireq;
}
-/**
-* isci_request_mark_zombie() - This function must be called with scic_lock held.
-*/
-static void isci_request_mark_zombie(struct isci_host *ihost, struct isci_request *ireq)
-{
- struct completion *tmf_completion = NULL;
- struct completion *req_completion;
-
- /* Set the request state to "dead". */
- ireq->status = dead;
-
- req_completion = ireq->io_request_completion;
- ireq->io_request_completion = NULL;
-
- if (test_bit(IREQ_TMF, &ireq->flags)) {
- /* Break links with the TMF request. */
- struct isci_tmf *tmf = isci_request_access_tmf(ireq);
-
- /* In the case where a task request is dying,
- * the thread waiting on the complete will sit and
- * timeout unless we wake it now. Since the TMF
- * has a default error status, complete it here
- * to wake the waiting thread.
- */
- if (tmf) {
- tmf_completion = tmf->complete;
- tmf->complete = NULL;
- }
- ireq->ttype_ptr.tmf_task_ptr = NULL;
- dev_dbg(&ihost->pdev->dev, "%s: tmf_code %d, managed tag %#x\n",
- __func__, tmf->tmf_code, tmf->io_tag);
- } else {
- /* Break links with the sas_task - the callback is done
- * elsewhere.
- */
- struct sas_task *task = isci_request_access_task(ireq);
-
- if (task)
- task->lldd_task = NULL;
-
- ireq->ttype_ptr.io_task_ptr = NULL;
- }
-
- dev_warn(&ihost->pdev->dev, "task context unrecoverable (tag: %#x)\n",
- ireq->io_tag);
-
- /* Don't force waiting threads to timeout. */
- if (req_completion)
- complete(req_completion);
-
- if (tmf_completion != NULL)
- complete(tmf_completion);
-}
-
static int isci_task_execute_tmf(struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_tmf *tmf, unsigned long timeout_ms)
@@ -400,17 +317,11 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
spin_unlock_irqrestore(&ihost->scic_lock, flags);
goto err_tci;
}
-
- if (tmf->cb_state_func != NULL)
- tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
-
- isci_request_change_state(ireq, started);
-
- /* add the request to the remote device request list. */
- list_add(&ireq->dev_node, &idev->reqs_in_process);
-
spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ /* The RNC must be unsuspended before the TMF can get a response. */
+ isci_remote_device_resume_from_abort(ihost, idev);
+
/* Wait for the TMF to complete, or a timeout. */
timeleft = wait_for_completion_timeout(&completion,
msecs_to_jiffies(timeout_ms));
@@ -419,32 +330,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
/* The TMF did not complete - this could be because
* of an unplug. Terminate the TMF request now.
*/
- spin_lock_irqsave(&ihost->scic_lock, flags);
-
- if (tmf->cb_state_func != NULL)
- tmf->cb_state_func(isci_tmf_timed_out, tmf,
- tmf->cb_data);
-
- sci_controller_terminate_request(ihost, idev, ireq);
-
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
- timeleft = wait_for_completion_timeout(
- &completion,
- msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
-
- if (!timeleft) {
- /* Strange condition - the termination of the TMF
- * request timed-out.
- */
- spin_lock_irqsave(&ihost->scic_lock, flags);
-
- /* If the TMF status has not changed, kill it. */
- if (tmf->status == SCI_FAILURE_TIMEOUT)
- isci_request_mark_zombie(ihost, ireq);
-
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
- }
+ isci_remote_device_suspend_terminate(ihost, idev, ireq);
}
isci_print_tmf(ihost, tmf);
@@ -476,315 +362,21 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
}
static void isci_task_build_tmf(struct isci_tmf *tmf,
- enum isci_tmf_function_codes code,
- void (*tmf_sent_cb)(enum isci_tmf_cb_state,
- struct isci_tmf *,
- void *),
- void *cb_data)
+ enum isci_tmf_function_codes code)
{
memset(tmf, 0, sizeof(*tmf));
-
- tmf->tmf_code = code;
- tmf->cb_state_func = tmf_sent_cb;
- tmf->cb_data = cb_data;
+ tmf->tmf_code = code;
}
static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
enum isci_tmf_function_codes code,
- void (*tmf_sent_cb)(enum isci_tmf_cb_state,
- struct isci_tmf *,
- void *),
struct isci_request *old_request)
{
- isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request);
+ isci_task_build_tmf(tmf, code);
tmf->io_tag = old_request->io_tag;
}
/**
- * isci_task_validate_request_to_abort() - This function checks the given I/O
- * against the "started" state. If the request is still "started", it's
- * state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
- * BEFORE CALLING THIS FUNCTION.
- * @isci_request: This parameter specifies the request object to control.
- * @isci_host: This parameter specifies the ISCI host object
- * @isci_device: This is the device to which the request is pending.
- * @aborted_io_completion: This is a completion structure that will be added to
- * the request in case it is changed to aborting; this completion is
- * triggered when the request is fully completed.
- *
- * Either "started" on successful change of the task status to "aborted", or
- * "unallocated" if the task cannot be controlled.
- */
-static enum isci_request_status isci_task_validate_request_to_abort(
- struct isci_request *isci_request,
- struct isci_host *isci_host,
- struct isci_remote_device *isci_device,
- struct completion *aborted_io_completion)
-{
- enum isci_request_status old_state = unallocated;
-
- /* Only abort the task if it's in the
- * device's request_in_process list
- */
- if (isci_request && !list_empty(&isci_request->dev_node)) {
- old_state = isci_request_change_started_to_aborted(
- isci_request, aborted_io_completion);
-
- }
-
- return old_state;
-}
-
-static int isci_request_is_dealloc_managed(enum isci_request_status stat)
-{
- switch (stat) {
- case aborted:
- case aborting:
- case terminating:
- case completed:
- case dead:
- return true;
- default:
- return false;
- }
-}
-
-/**
- * isci_terminate_request_core() - This function will terminate the given
- * request, and wait for it to complete. This function must only be called
- * from a thread that can wait. Note that the request is terminated and
- * completed (back to the host, if started there).
- * @ihost: This SCU.
- * @idev: The target.
- * @isci_request: The I/O request to be terminated.
- *
- */
-static void isci_terminate_request_core(struct isci_host *ihost,
- struct isci_remote_device *idev,
- struct isci_request *isci_request)
-{
- enum sci_status status = SCI_SUCCESS;
- bool was_terminated = false;
- bool needs_cleanup_handling = false;
- unsigned long flags;
- unsigned long termination_completed = 1;
- struct completion *io_request_completion;
-
- dev_dbg(&ihost->pdev->dev,
- "%s: device = %p; request = %p\n",
- __func__, idev, isci_request);
-
- spin_lock_irqsave(&ihost->scic_lock, flags);
-
- io_request_completion = isci_request->io_request_completion;
-
- /* Note that we are not going to control
- * the target to abort the request.
- */
- set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags);
-
- /* Make sure the request wasn't just sitting around signalling
- * device condition (if the request handle is NULL, then the
- * request completed but needed additional handling here).
- */
- if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
- was_terminated = true;
- needs_cleanup_handling = true;
- status = sci_controller_terminate_request(ihost,
- idev,
- isci_request);
- }
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
- /*
- * The only time the request to terminate will
- * fail is when the io request is completed and
- * being aborted.
- */
- if (status != SCI_SUCCESS) {
- dev_dbg(&ihost->pdev->dev,
- "%s: sci_controller_terminate_request"
- " returned = 0x%x\n",
- __func__, status);
-
- isci_request->io_request_completion = NULL;
-
- } else {
- if (was_terminated) {
- dev_dbg(&ihost->pdev->dev,
- "%s: before completion wait (%p/%p)\n",
- __func__, isci_request, io_request_completion);
-
- /* Wait here for the request to complete. */
- termination_completed
- = wait_for_completion_timeout(
- io_request_completion,
- msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC));
-
- if (!termination_completed) {
-
- /* The request to terminate has timed out. */
- spin_lock_irqsave(&ihost->scic_lock, flags);
-
- /* Check for state changes. */
- if (!test_bit(IREQ_TERMINATED,
- &isci_request->flags)) {
-
- /* The best we can do is to have the
- * request die a silent death if it
- * ever really completes.
- */
- isci_request_mark_zombie(ihost,
- isci_request);
- needs_cleanup_handling = true;
- } else
- termination_completed = 1;
-
- spin_unlock_irqrestore(&ihost->scic_lock,
- flags);
-
- if (!termination_completed) {
-
- dev_dbg(&ihost->pdev->dev,
- "%s: *** Timeout waiting for "
- "termination(%p/%p)\n",
- __func__, io_request_completion,
- isci_request);
-
- /* The request can no longer be referenced
- * safely since it may go away if the
- * termination every really does complete.
- */
- isci_request = NULL;
- }
- }
- if (termination_completed)
- dev_dbg(&ihost->pdev->dev,
- "%s: after completion wait (%p/%p)\n",
- __func__, isci_request, io_request_completion);
- }
-
- if (termination_completed) {
-
- isci_request->io_request_completion = NULL;
-
- /* Peek at the status of the request. This will tell
- * us if there was special handling on the request such that it
- * needs to be detached and freed here.
- */
- spin_lock_irqsave(&isci_request->state_lock, flags);
-
- needs_cleanup_handling
- = isci_request_is_dealloc_managed(
- isci_request->status);
-
- spin_unlock_irqrestore(&isci_request->state_lock, flags);
-
- }
- if (needs_cleanup_handling) {
-
- dev_dbg(&ihost->pdev->dev,
- "%s: cleanup isci_device=%p, request=%p\n",
- __func__, idev, isci_request);
-
- if (isci_request != NULL) {
- spin_lock_irqsave(&ihost->scic_lock, flags);
- isci_free_tag(ihost, isci_request->io_tag);
- isci_request_change_state(isci_request, unallocated);
- list_del_init(&isci_request->dev_node);
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
- }
- }
- }
-}
-
-/**
- * isci_terminate_pending_requests() - This function will change the all of the
- * requests on the given device's state to "aborting", will terminate the
- * requests, and wait for them to complete. This function must only be
- * called from a thread that can wait. Note that the requests are all
- * terminated and completed (back to the host, if started there).
- * @isci_host: This parameter specifies SCU.
- * @idev: This parameter specifies the target.
- *
- */
-void isci_terminate_pending_requests(struct isci_host *ihost,
- struct isci_remote_device *idev)
-{
- struct completion request_completion;
- enum isci_request_status old_state;
- unsigned long flags;
- LIST_HEAD(list);
-
- spin_lock_irqsave(&ihost->scic_lock, flags);
- list_splice_init(&idev->reqs_in_process, &list);
-
- /* assumes that isci_terminate_request_core deletes from the list */
- while (!list_empty(&list)) {
- struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node);
-
- /* Change state to "terminating" if it is currently
- * "started".
- */
- old_state = isci_request_change_started_to_newstate(ireq,
- &request_completion,
- terminating);
- switch (old_state) {
- case started:
- case completed:
- case aborting:
- break;
- default:
- /* termination in progress, or otherwise dispositioned.
- * We know the request was on 'list' so should be safe
- * to move it back to reqs_in_process
- */
- list_move(&ireq->dev_node, &idev->reqs_in_process);
- ireq = NULL;
- break;
- }
-
- if (!ireq)
- continue;
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
- init_completion(&request_completion);
-
- dev_dbg(&ihost->pdev->dev,
- "%s: idev=%p request=%p; task=%p old_state=%d\n",
- __func__, idev, ireq,
- (!test_bit(IREQ_TMF, &ireq->flags)
- ? isci_request_access_task(ireq)
- : NULL),
- old_state);
-
- /* If the old_state is started:
- * This request was not already being aborted. If it had been,
- * then the aborting I/O (ie. the TMF request) would not be in
- * the aborting state, and thus would be terminated here. Note
- * that since the TMF completion's call to the kernel function
- * "complete()" does not happen until the pending I/O request
- * terminate fully completes, we do not have to implement a
- * special wait here for already aborting requests - the
- * termination of the TMF request will force the request
- * to finish it's already started terminate.
- *
- * If old_state == completed:
- * This request completed from the SCU hardware perspective
- * and now just needs cleaning up in terms of freeing the
- * request and potentially calling up to libsas.
- *
- * If old_state == aborting:
- * This request has already gone through a TMF timeout, but may
- * not have been terminated; needs cleaning up at least.
- */
- isci_terminate_request_core(ihost, idev, ireq);
- spin_lock_irqsave(&ihost->scic_lock, flags);
- }
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
-}
-
-/**
* isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
* Template functions.
* @lun: This parameter specifies the lun to be reset.
@@ -807,7 +399,7 @@ static int isci_task_send_lu_reset_sas(
* value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
* was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
*/
- isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL);
+ isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset);
#define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
@@ -826,42 +418,44 @@ static int isci_task_send_lu_reset_sas(
int isci_task_lu_reset(struct domain_device *dev, u8 *lun)
{
- struct isci_host *isci_host = dev_to_ihost(dev);
- struct isci_remote_device *isci_device;
+ struct isci_host *ihost = dev_to_ihost(dev);
+ struct isci_remote_device *idev;
unsigned long flags;
- int ret;
+ int ret = TMF_RESP_FUNC_COMPLETE;
- spin_lock_irqsave(&isci_host->scic_lock, flags);
- isci_device = isci_lookup_device(dev);
- spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev = isci_get_device(dev->lldd_dev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
- dev_dbg(&isci_host->pdev->dev,
+ dev_dbg(&ihost->pdev->dev,
"%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
- __func__, dev, isci_host, isci_device);
+ __func__, dev, ihost, idev);
- if (!isci_device) {
- /* If the device is gone, stop the escalations. */
- dev_dbg(&isci_host->pdev->dev, "%s: No dev\n", __func__);
+ if (!idev) {
+ /* If the device is gone, escalate to I_T_Nexus_Reset. */
+ dev_dbg(&ihost->pdev->dev, "%s: No dev\n", __func__);
- ret = TMF_RESP_FUNC_COMPLETE;
+ ret = TMF_RESP_FUNC_FAILED;
goto out;
}
- /* Send the task management part of the reset. */
- if (dev_is_sata(dev)) {
- sas_ata_schedule_reset(dev);
- ret = TMF_RESP_FUNC_COMPLETE;
- } else
- ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
-
- /* If the LUN reset worked, all the I/O can now be terminated. */
- if (ret == TMF_RESP_FUNC_COMPLETE)
- /* Terminate all I/O now. */
- isci_terminate_pending_requests(isci_host,
- isci_device);
-
+ /* Suspend the RNC, kill all TCs */
+ if (isci_remote_device_suspend_terminate(ihost, idev, NULL)
+ != SCI_SUCCESS) {
+ /* The suspend/terminate only fails if isci_get_device fails */
+ ret = TMF_RESP_FUNC_FAILED;
+ goto out;
+ }
+ /* All pending I/Os have been terminated and cleaned up. */
+ if (!test_bit(IDEV_GONE, &idev->flags)) {
+ if (dev_is_sata(dev))
+ sas_ata_schedule_reset(dev);
+ else
+ /* Send the task management part of the reset. */
+ ret = isci_task_send_lu_reset_sas(ihost, idev, lun);
+ }
out:
- isci_put_device(isci_device);
+ isci_put_device(idev);
return ret;
}
@@ -882,63 +476,6 @@ int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
/* Task Management Functions. Must be called from process context. */
/**
- * isci_abort_task_process_cb() - This is a helper function for the abort task
- * TMF command. It manages the request state with respect to the successful
- * transmission / completion of the abort task request.
- * @cb_state: This parameter specifies when this function was called - after
- * the TMF request has been started and after it has timed-out.
- * @tmf: This parameter specifies the TMF in progress.
- *
- *
- */
-static void isci_abort_task_process_cb(
- enum isci_tmf_cb_state cb_state,
- struct isci_tmf *tmf,
- void *cb_data)
-{
- struct isci_request *old_request;
-
- old_request = (struct isci_request *)cb_data;
-
- dev_dbg(&old_request->isci_host->pdev->dev,
- "%s: tmf=%p, old_request=%p\n",
- __func__, tmf, old_request);
-
- switch (cb_state) {
-
- case isci_tmf_started:
- /* The TMF has been started. Nothing to do here, since the
- * request state was already set to "aborted" by the abort
- * task function.
- */
- if ((old_request->status != aborted)
- && (old_request->status != completed))
- dev_dbg(&old_request->isci_host->pdev->dev,
- "%s: Bad request status (%d): tmf=%p, old_request=%p\n",
- __func__, old_request->status, tmf, old_request);
- break;
-
- case isci_tmf_timed_out:
-
- /* Set the task's state to "aborting", since the abort task
- * function thread set it to "aborted" (above) in anticipation
- * of the task management request working correctly. Since the
- * timeout has now fired, the TMF request failed. We set the
- * state such that the request completion will indicate the
- * device is no longer present.
- */
- isci_request_change_state(old_request, aborting);
- break;
-
- default:
- dev_dbg(&old_request->isci_host->pdev->dev,
- "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
- __func__, cb_state, tmf, old_request);
- break;
- }
-}
-
-/**
* isci_task_abort_task() - This function is one of the SAS Domain Template
* functions. This function is called by libsas to abort a specified task.
* @task: This parameter specifies the SAS task to abort.
@@ -947,22 +484,20 @@ static void isci_abort_task_process_cb(
*/
int isci_task_abort_task(struct sas_task *task)
{
- struct isci_host *isci_host = dev_to_ihost(task->dev);
+ struct isci_host *ihost = dev_to_ihost(task->dev);
DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
struct isci_request *old_request = NULL;
- enum isci_request_status old_state;
- struct isci_remote_device *isci_device = NULL;
+ struct isci_remote_device *idev = NULL;
struct isci_tmf tmf;
int ret = TMF_RESP_FUNC_FAILED;
unsigned long flags;
- int perform_termination = 0;
/* Get the isci_request reference from the task. Note that
* this check does not depend on the pending request list
* in the device, because tasks driving resets may land here
* after completion in the core.
*/
- spin_lock_irqsave(&isci_host->scic_lock, flags);
+ spin_lock_irqsave(&ihost->scic_lock, flags);
spin_lock(&task->task_state_lock);
old_request = task->lldd_task;
@@ -971,20 +506,29 @@ int isci_task_abort_task(struct sas_task *task)
if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
(task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
old_request)
- isci_device = isci_lookup_device(task->dev);
+ idev = isci_get_device(task->dev->lldd_dev);
spin_unlock(&task->task_state_lock);
- spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
- dev_dbg(&isci_host->pdev->dev,
- "%s: dev = %p, task = %p, old_request == %p\n",
- __func__, isci_device, task, old_request);
+ dev_warn(&ihost->pdev->dev,
+ "%s: dev = %p (%s%s), task = %p, old_request == %p\n",
+ __func__, idev,
+ (dev_is_sata(task->dev) ? "STP/SATA"
+ : ((dev_is_expander(task->dev))
+ ? "SMP"
+ : "SSP")),
+ ((idev) ? ((test_bit(IDEV_GONE, &idev->flags))
+ ? " IDEV_GONE"
+ : "")
+ : " <NULL>"),
+ task, old_request);
/* Device reset conditions signalled in task_state_flags are the
* responsbility of libsas to observe at the start of the error
* handler thread.
*/
- if (!isci_device || !old_request) {
+ if (!idev || !old_request) {
/* The request has already completed and there
* is nothing to do here other than to set the task
* done bit, and indicate that the task abort function
@@ -998,108 +542,72 @@ int isci_task_abort_task(struct sas_task *task)
ret = TMF_RESP_FUNC_COMPLETE;
- dev_dbg(&isci_host->pdev->dev,
- "%s: abort task not needed for %p\n",
- __func__, task);
+ dev_warn(&ihost->pdev->dev,
+ "%s: abort task not needed for %p\n",
+ __func__, task);
goto out;
}
-
- spin_lock_irqsave(&isci_host->scic_lock, flags);
-
- /* Check the request status and change to "aborted" if currently
- * "starting"; if true then set the I/O kernel completion
- * struct that will be triggered when the request completes.
- */
- old_state = isci_task_validate_request_to_abort(
- old_request, isci_host, isci_device,
- &aborted_io_completion);
- if ((old_state != started) &&
- (old_state != completed) &&
- (old_state != aborting)) {
-
- spin_unlock_irqrestore(&isci_host->scic_lock, flags);
-
- /* The request was already being handled by someone else (because
- * they got to set the state away from started).
- */
- dev_dbg(&isci_host->pdev->dev,
- "%s: device = %p; old_request %p already being aborted\n",
- __func__,
- isci_device, old_request);
- ret = TMF_RESP_FUNC_COMPLETE;
+ /* Suspend the RNC, kill the TC */
+ if (isci_remote_device_suspend_terminate(ihost, idev, old_request)
+ != SCI_SUCCESS) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: isci_remote_device_reset_terminate(dev=%p, "
+ "req=%p, task=%p) failed\n",
+ __func__, idev, old_request, task);
+ ret = TMF_RESP_FUNC_FAILED;
goto out;
}
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
if (task->task_proto == SAS_PROTOCOL_SMP ||
sas_protocol_ata(task->task_proto) ||
- test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
+ test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags) ||
+ test_bit(IDEV_GONE, &idev->flags)) {
- spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
- dev_dbg(&isci_host->pdev->dev,
- "%s: %s request"
- " or complete_in_target (%d), thus no TMF\n",
- __func__,
- ((task->task_proto == SAS_PROTOCOL_SMP)
- ? "SMP"
- : (sas_protocol_ata(task->task_proto)
- ? "SATA/STP"
- : "<other>")
- ),
- test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
-
- if (test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
- spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags |= SAS_TASK_STATE_DONE;
- task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
- SAS_TASK_STATE_PENDING);
- spin_unlock_irqrestore(&task->task_state_lock, flags);
- ret = TMF_RESP_FUNC_COMPLETE;
- } else {
- spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
- SAS_TASK_STATE_PENDING);
- spin_unlock_irqrestore(&task->task_state_lock, flags);
- }
+ /* No task to send, so explicitly resume the device here */
+ isci_remote_device_resume_from_abort(ihost, idev);
- /* STP and SMP devices are not sent a TMF, but the
- * outstanding I/O request is terminated below. This is
- * because SATA/STP and SMP discovery path timeouts directly
- * call the abort task interface for cleanup.
- */
- perform_termination = 1;
+ dev_warn(&ihost->pdev->dev,
+ "%s: %s request"
+ " or complete_in_target (%d), "
+ "or IDEV_GONE (%d), thus no TMF\n",
+ __func__,
+ ((task->task_proto == SAS_PROTOCOL_SMP)
+ ? "SMP"
+ : (sas_protocol_ata(task->task_proto)
+ ? "SATA/STP"
+ : "<other>")
+ ),
+ test_bit(IREQ_COMPLETE_IN_TARGET,
+ &old_request->flags),
+ test_bit(IDEV_GONE, &idev->flags));
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
+ SAS_TASK_STATE_PENDING);
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ ret = TMF_RESP_FUNC_COMPLETE;
} else {
/* Fill in the tmf stucture */
isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
- isci_abort_task_process_cb,
old_request);
- spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ /* Send the task management request. */
#define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */
- ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
+ ret = isci_task_execute_tmf(ihost, idev, &tmf,
ISCI_ABORT_TASK_TIMEOUT_MS);
-
- if (ret == TMF_RESP_FUNC_COMPLETE)
- perform_termination = 1;
- else
- dev_dbg(&isci_host->pdev->dev,
- "%s: isci_task_send_tmf failed\n", __func__);
}
- if (perform_termination) {
- set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags);
-
- /* Clean up the request on our side, and wait for the aborted
- * I/O to complete.
- */
- isci_terminate_request_core(isci_host, isci_device,
- old_request);
- }
-
- /* Make sure we do not leave a reference to aborted_io_completion */
- old_request->io_request_completion = NULL;
- out:
- isci_put_device(isci_device);
+out:
+ dev_warn(&ihost->pdev->dev,
+ "%s: Done; dev = %p, task = %p , old_request == %p\n",
+ __func__, idev, task, old_request);
+ isci_put_device(idev);
return ret;
}
@@ -1195,14 +703,11 @@ isci_task_request_complete(struct isci_host *ihost,
{
struct isci_tmf *tmf = isci_request_access_tmf(ireq);
struct completion *tmf_complete = NULL;
- struct completion *request_complete = ireq->io_request_completion;
dev_dbg(&ihost->pdev->dev,
"%s: request = %p, status=%d\n",
__func__, ireq, completion_status);
- isci_request_change_state(ireq, completed);
-
set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
if (tmf) {
@@ -1226,20 +731,11 @@ isci_task_request_complete(struct isci_host *ihost,
*/
set_bit(IREQ_TERMINATED, &ireq->flags);
- /* As soon as something is in the terminate path, deallocation is
- * managed there. Note that the final non-managed state of a task
- * request is "completed".
- */
- if ((ireq->status == completed) ||
- !isci_request_is_dealloc_managed(ireq->status)) {
- isci_request_change_state(ireq, unallocated);
- isci_free_tag(ihost, ireq->io_tag);
- list_del_init(&ireq->dev_node);
- }
+ if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags))
+ wake_up_all(&ihost->eventq);
- /* "request_complete" is set if the task was being terminated. */
- if (request_complete)
- complete(request_complete);
+ if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags))
+ isci_free_tag(ihost, ireq->io_tag);
/* The task management part completes last. */
if (tmf_complete)
@@ -1250,48 +746,38 @@ static int isci_reset_device(struct isci_host *ihost,
struct domain_device *dev,
struct isci_remote_device *idev)
{
- int rc;
- unsigned long flags;
- enum sci_status status;
+ int rc = TMF_RESP_FUNC_COMPLETE, reset_stat = -1;
struct sas_phy *phy = sas_get_local_phy(dev);
struct isci_port *iport = dev->port->lldd_port;
dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
- spin_lock_irqsave(&ihost->scic_lock, flags);
- status = sci_remote_device_reset(idev);
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
- if (status != SCI_SUCCESS) {
- dev_dbg(&ihost->pdev->dev,
- "%s: sci_remote_device_reset(%p) returned %d!\n",
- __func__, idev, status);
+ /* Suspend the RNC, terminate all outstanding TCs. */
+ if (isci_remote_device_suspend_terminate(ihost, idev, NULL)
+ != SCI_SUCCESS) {
rc = TMF_RESP_FUNC_FAILED;
goto out;
}
-
- if (scsi_is_sas_phy_local(phy)) {
- struct isci_phy *iphy = &ihost->phys[phy->number];
-
- rc = isci_port_perform_hard_reset(ihost, iport, iphy);
- } else
- rc = sas_phy_reset(phy, !dev_is_sata(dev));
-
- /* Terminate in-progress I/O now. */
- isci_remote_device_nuke_requests(ihost, idev);
-
- /* Since all pending TCs have been cleaned, resume the RNC. */
- spin_lock_irqsave(&ihost->scic_lock, flags);
- status = sci_remote_device_reset_complete(idev);
- spin_unlock_irqrestore(&ihost->scic_lock, flags);
-
- if (status != SCI_SUCCESS) {
- dev_dbg(&ihost->pdev->dev,
- "%s: sci_remote_device_reset_complete(%p) "
- "returned %d!\n", __func__, idev, status);
+ /* Note that since the termination for outstanding requests succeeded,
+ * this function will return success. This is because the resets will
+ * only fail if the device has been removed (ie. hotplug), and the
+ * primary duty of this function is to cleanup tasks, so that is the
+ * relevant status.
+ */
+ if (!test_bit(IDEV_GONE, &idev->flags)) {
+ if (scsi_is_sas_phy_local(phy)) {
+ struct isci_phy *iphy = &ihost->phys[phy->number];
+
+ reset_stat = isci_port_perform_hard_reset(ihost, iport,
+ iphy);
+ } else
+ reset_stat = sas_phy_reset(phy, !dev_is_sata(dev));
}
+ /* Explicitly resume the RNC here, since there was no task sent. */
+ isci_remote_device_resume_from_abort(ihost, idev);
- dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
+ dev_dbg(&ihost->pdev->dev, "%s: idev %p complete, reset_stat=%d.\n",
+ __func__, idev, reset_stat);
out:
sas_put_local_phy(phy);
return rc;
@@ -1305,7 +791,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)
int ret;
spin_lock_irqsave(&ihost->scic_lock, flags);
- idev = isci_lookup_device(dev);
+ idev = isci_get_device(dev->lldd_dev);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
if (!idev) {
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index 7b6d0e3..9c06cba 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -63,19 +63,6 @@
struct isci_request;
/**
- * enum isci_tmf_cb_state - This enum defines the possible states in which the
- * TMF callback function is invoked during the TMF execution process.
- *
- *
- */
-enum isci_tmf_cb_state {
-
- isci_tmf_init_state = 0,
- isci_tmf_started,
- isci_tmf_timed_out
-};
-
-/**
* enum isci_tmf_function_codes - This enum defines the possible preparations
* of task management requests.
*
@@ -87,6 +74,7 @@ enum isci_tmf_function_codes {
isci_tmf_ssp_task_abort = TMF_ABORT_TASK,
isci_tmf_ssp_lun_reset = TMF_LU_RESET,
};
+
/**
* struct isci_tmf - This class represents the task management object which
* acts as an interface to libsas for processing task management requests
@@ -106,15 +94,6 @@ struct isci_tmf {
u16 io_tag;
enum isci_tmf_function_codes tmf_code;
int status;
-
- /* The optional callback function allows the user process to
- * track the TMF transmit / timeout conditions.
- */
- void (*cb_state_func)(
- enum isci_tmf_cb_state,
- struct isci_tmf *, void *);
- void *cb_data;
-
};
static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
@@ -208,113 +187,4 @@ int isci_queuecommand(
struct scsi_cmnd *scsi_cmd,
void (*donefunc)(struct scsi_cmnd *));
-/**
- * enum isci_completion_selection - This enum defines the possible actions to
- * take with respect to a given request's notification back to libsas.
- *
- *
- */
-enum isci_completion_selection {
-
- isci_perform_normal_io_completion, /* Normal notify (task_done) */
- isci_perform_aborted_io_completion, /* No notification. */
- isci_perform_error_io_completion /* Use sas_task_abort */
-};
-
-/**
- * isci_task_set_completion_status() - This function sets the completion status
- * for the request.
- * @task: This parameter is the completed request.
- * @response: This parameter is the response code for the completed task.
- * @status: This parameter is the status code for the completed task.
- *
-* @return The new notification mode for the request.
-*/
-static inline enum isci_completion_selection
-isci_task_set_completion_status(
- struct sas_task *task,
- enum service_response response,
- enum exec_status status,
- enum isci_completion_selection task_notification_selection)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&task->task_state_lock, flags);
-
- /* If a device reset is being indicated, make sure the I/O
- * is in the error path.
- */
- if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
- /* Fail the I/O to make sure it goes into the error path. */
- response = SAS_TASK_UNDELIVERED;
- status = SAM_STAT_TASK_ABORTED;
-
- task_notification_selection = isci_perform_error_io_completion;
- }
- task->task_status.resp = response;
- task->task_status.stat = status;
-
- switch (task->task_proto) {
-
- case SAS_PROTOCOL_SATA:
- case SAS_PROTOCOL_STP:
- case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
-
- if (task_notification_selection
- == isci_perform_error_io_completion) {
- /* SATA/STP I/O has it's own means of scheduling device
- * error handling on the normal path.
- */
- task_notification_selection
- = isci_perform_normal_io_completion;
- }
- break;
- default:
- break;
- }
-
- switch (task_notification_selection) {
-
- case isci_perform_error_io_completion:
-
- if (task->task_proto == SAS_PROTOCOL_SMP) {
- /* There is no error escalation in the SMP case.
- * Convert to a normal completion to avoid the
- * timeout in the discovery path and to let the
- * next action take place quickly.
- */
- task_notification_selection
- = isci_perform_normal_io_completion;
-
- /* Fall through to the normal case... */
- } else {
- /* Use sas_task_abort */
- /* Leave SAS_TASK_STATE_DONE clear
- * Leave SAS_TASK_AT_INITIATOR set.
- */
- break;
- }
-
- case isci_perform_aborted_io_completion:
- /* This path can occur with task-managed requests as well as
- * requests terminated because of LUN or device resets.
- */
- /* Fall through to the normal case... */
- case isci_perform_normal_io_completion:
- /* Normal notification (task_done) */
- task->task_state_flags |= SAS_TASK_STATE_DONE;
- task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR |
- SAS_TASK_STATE_PENDING);
- break;
- default:
- WARN_ONCE(1, "unknown task_notification_selection: %d\n",
- task_notification_selection);
- break;
- }
-
- spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- return task_notification_selection;
-
-}
#endif /* !defined(_SCI_TASK_H_) */
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c
index 16f88ab..04a6d0d 100644
--- a/drivers/scsi/isci/unsolicited_frame_control.c
+++ b/drivers/scsi/isci/unsolicited_frame_control.c
@@ -57,31 +57,19 @@
#include "unsolicited_frame_control.h"
#include "registers.h"
-int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
+void sci_unsolicited_frame_control_construct(struct isci_host *ihost)
{
struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control;
struct sci_unsolicited_frame *uf;
- u32 buf_len, header_len, i;
- dma_addr_t dma;
- size_t size;
- void *virt;
-
- /*
- * Prepare all of the memory sizes for the UF headers, UF address
- * table, and UF buffers themselves.
- */
- buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
- header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header);
- size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]);
+ dma_addr_t dma = ihost->ufi_dma;
+ void *virt = ihost->ufi_buf;
+ int i;
/*
* The Unsolicited Frame buffers are set at the start of the UF
* memory descriptor entry. The headers and address table will be
* placed after the buffers.
*/
- virt = dmam_alloc_coherent(&ihost->pdev->dev, size, &dma, GFP_KERNEL);
- if (!virt)
- return -ENOMEM;
/*
* Program the location of the UF header table into the SCU.
@@ -93,8 +81,8 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
* headers, since we program the UF address table pointers to
* NULL.
*/
- uf_control->headers.physical_address = dma + buf_len;
- uf_control->headers.array = virt + buf_len;
+ uf_control->headers.physical_address = dma + SCI_UFI_BUF_SIZE;
+ uf_control->headers.array = virt + SCI_UFI_BUF_SIZE;
/*
* Program the location of the UF address table into the SCU.
@@ -103,8 +91,8 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
* byte boundary already due to above programming headers being on a
* 64-bit boundary and headers are on a 64-bytes in size.
*/
- uf_control->address_table.physical_address = dma + buf_len + header_len;
- uf_control->address_table.array = virt + buf_len + header_len;
+ uf_control->address_table.physical_address = dma + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE;
+ uf_control->address_table.array = virt + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE;
uf_control->get = 0;
/*
@@ -135,8 +123,6 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
}
-
- return 0;
}
enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control,
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h
index 75d8966..1bc551e 100644
--- a/drivers/scsi/isci/unsolicited_frame_control.h
+++ b/drivers/scsi/isci/unsolicited_frame_control.h
@@ -257,9 +257,13 @@ struct sci_unsolicited_frame_control {
};
+#define SCI_UFI_BUF_SIZE (SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE)
+#define SCI_UFI_HDR_SIZE (SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header))
+#define SCI_UFI_TOTAL_SIZE (SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE + SCU_MAX_UNSOLICITED_FRAMES * sizeof(u64))
+
struct isci_host;
-int sci_unsolicited_frame_control_construct(struct isci_host *ihost);
+void sci_unsolicited_frame_control_construct(struct isci_host *ihost);
enum sci_status sci_unsolicited_frame_control_get_header(
struct sci_unsolicited_frame_control *uf_control,
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index fb6610b..c1402fb 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1742,17 +1742,19 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
mfs = ntohs(flp->fl_csp.sp_bb_data) &
FC_SP_BB_DATA_MASK;
- if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
- mfs <= lport->mfs) {
- lport->mfs = mfs;
- fc_host_maxframe_size(lport->host) = mfs;
- } else {
+
+ if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) {
FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
"lport->mfs:%hu\n", mfs, lport->mfs);
fc_lport_error(lport, fp);
goto err;
}
+ if (mfs <= lport->mfs) {
+ lport->mfs = mfs;
+ fc_host_maxframe_size(lport->host) = mfs;
+ }
+
csp_flags = ntohs(flp->fl_csp.sp_features);
r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index bc0cecc..441d88a 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -546,11 +546,12 @@ static struct ata_port_info sata_port_info = {
.port_ops = &sas_sata_ops
};
-int sas_ata_init_host_and_port(struct domain_device *found_dev)
+int sas_ata_init(struct domain_device *found_dev)
{
struct sas_ha_struct *ha = found_dev->port->ha;
struct Scsi_Host *shost = ha->core.shost;
struct ata_port *ap;
+ int rc;
ata_host_init(&found_dev->sata_dev.ata_host,
ha->dev,
@@ -567,8 +568,11 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev)
ap->private_data = found_dev;
ap->cbl = ATA_CBL_SATA;
ap->scsi_host = shost;
- /* publish initialized ata port */
- smp_wmb();
+ rc = ata_sas_port_init(ap);
+ if (rc) {
+ ata_sas_port_destroy(ap);
+ return rc;
+ }
found_dev->sata_dev.ap = ap;
return 0;
@@ -648,18 +652,13 @@ static void sas_get_ata_command_set(struct domain_device *dev)
void sas_probe_sata(struct asd_sas_port *port)
{
struct domain_device *dev, *n;
- int err;
mutex_lock(&port->ha->disco_mutex);
- list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) {
+ list_for_each_entry(dev, &port->disco_list, disco_list_node) {
if (!dev_is_sata(dev))
continue;
- err = sas_ata_init_host_and_port(dev);
- if (err)
- sas_fail_probe(dev, __func__, err);
- else
- ata_sas_async_port_init(dev->sata_dev.ap);
+ ata_sas_async_probe(dev->sata_dev.ap);
}
mutex_unlock(&port->ha->disco_mutex);
@@ -718,18 +717,6 @@ static void async_sas_ata_eh(void *data, async_cookie_t cookie)
sas_put_device(dev);
}
-static bool sas_ata_dev_eh_valid(struct domain_device *dev)
-{
- struct ata_port *ap;
-
- if (!dev_is_sata(dev))
- return false;
- ap = dev->sata_dev.ap;
- /* consume fully initialized ata ports */
- smp_rmb();
- return !!ap;
-}
-
void sas_ata_strategy_handler(struct Scsi_Host *shost)
{
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
@@ -753,7 +740,7 @@ void sas_ata_strategy_handler(struct Scsi_Host *shost)
spin_lock(&port->dev_list_lock);
list_for_each_entry(dev, &port->dev_list, dev_list_node) {
- if (!sas_ata_dev_eh_valid(dev))
+ if (!dev_is_sata(dev))
continue;
async_schedule_domain(async_sas_ata_eh, dev, &async);
}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 3646796..629a086 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -72,6 +72,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
struct asd_sas_phy *phy;
struct sas_rphy *rphy;
struct domain_device *dev;
+ int rc = -ENODEV;
dev = sas_alloc_device();
if (!dev)
@@ -110,9 +111,16 @@ static int sas_get_port_device(struct asd_sas_port *port)
sas_init_dev(dev);
+ dev->port = port;
switch (dev->dev_type) {
- case SAS_END_DEV:
case SATA_DEV:
+ rc = sas_ata_init(dev);
+ if (rc) {
+ rphy = NULL;
+ break;
+ }
+ /* fall through */
+ case SAS_END_DEV:
rphy = sas_end_device_alloc(port->port);
break;
case EDGE_DEV:
@@ -131,19 +139,14 @@ static int sas_get_port_device(struct asd_sas_port *port)
if (!rphy) {
sas_put_device(dev);
- return -ENODEV;
+ return rc;
}
- spin_lock_irq(&port->phy_list_lock);
- list_for_each_entry(phy, &port->phy_list, port_phy_el)
- sas_phy_set_target(phy, dev);
- spin_unlock_irq(&port->phy_list_lock);
rphy->identify.phy_identifier = phy->phy->identify.phy_identifier;
memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE);
sas_fill_in_rphy(dev, rphy);
sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr);
port->port_dev = dev;
- dev->port = port;
dev->linkrate = port->linkrate;
dev->min_linkrate = port->linkrate;
dev->max_linkrate = port->linkrate;
@@ -155,6 +158,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
sas_device_set_phy(dev, port->port);
dev->rphy = rphy;
+ get_device(&dev->rphy->dev);
if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV)
list_add_tail(&dev->disco_list_node, &port->disco_list);
@@ -164,6 +168,11 @@ static int sas_get_port_device(struct asd_sas_port *port)
spin_unlock_irq(&port->dev_list_lock);
}
+ spin_lock_irq(&port->phy_list_lock);
+ list_for_each_entry(phy, &port->phy_list, port_phy_el)
+ sas_phy_set_target(phy, dev);
+ spin_unlock_irq(&port->phy_list_lock);
+
return 0;
}
@@ -205,8 +214,7 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev)
static void sas_probe_devices(struct work_struct *work)
{
struct domain_device *dev, *n;
- struct sas_discovery_event *ev =
- container_of(work, struct sas_discovery_event, work);
+ struct sas_discovery_event *ev = to_sas_discovery_event(work);
struct asd_sas_port *port = ev->port;
clear_bit(DISCE_PROBE, &port->disc.pending);
@@ -255,6 +263,9 @@ void sas_free_device(struct kref *kref)
{
struct domain_device *dev = container_of(kref, typeof(*dev), kref);
+ put_device(&dev->rphy->dev);
+ dev->rphy = NULL;
+
if (dev->parent)
sas_put_device(dev->parent);
@@ -291,8 +302,7 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
static void sas_destruct_devices(struct work_struct *work)
{
struct domain_device *dev, *n;
- struct sas_discovery_event *ev =
- container_of(work, struct sas_discovery_event, work);
+ struct sas_discovery_event *ev = to_sas_discovery_event(work);
struct asd_sas_port *port = ev->port;
clear_bit(DISCE_DESTRUCT, &port->disc.pending);
@@ -302,7 +312,6 @@ static void sas_destruct_devices(struct work_struct *work)
sas_remove_children(&dev->rphy->dev);
sas_rphy_delete(dev->rphy);
- dev->rphy = NULL;
sas_unregister_common_dev(port, dev);
}
}
@@ -314,11 +323,11 @@ void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
/* this rphy never saw sas_rphy_add */
list_del_init(&dev->disco_list_node);
sas_rphy_free(dev->rphy);
- dev->rphy = NULL;
sas_unregister_common_dev(port, dev);
+ return;
}
- if (dev->rphy && !test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) {
+ if (!test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) {
sas_rphy_unlink(dev->rphy);
list_move_tail(&dev->disco_list_node, &port->destroy_list);
sas_discover_event(dev->port, DISCE_DESTRUCT);
@@ -377,8 +386,7 @@ static void sas_discover_domain(struct work_struct *work)
{
struct domain_device *dev;
int error = 0;
- struct sas_discovery_event *ev =
- container_of(work, struct sas_discovery_event, work);
+ struct sas_discovery_event *ev = to_sas_discovery_event(work);
struct asd_sas_port *port = ev->port;
clear_bit(DISCE_DISCOVER_DOMAIN, &port->disc.pending);
@@ -419,8 +427,6 @@ static void sas_discover_domain(struct work_struct *work)
if (error) {
sas_rphy_free(dev->rphy);
- dev->rphy = NULL;
-
list_del_init(&dev->disco_list_node);
spin_lock_irq(&port->dev_list_lock);
list_del_init(&dev->dev_list_node);
@@ -437,8 +443,7 @@ static void sas_discover_domain(struct work_struct *work)
static void sas_revalidate_domain(struct work_struct *work)
{
int res = 0;
- struct sas_discovery_event *ev =
- container_of(work, struct sas_discovery_event, work);
+ struct sas_discovery_event *ev = to_sas_discovery_event(work);
struct asd_sas_port *port = ev->port;
struct sas_ha_struct *ha = port->ha;
@@ -466,21 +471,25 @@ static void sas_revalidate_domain(struct work_struct *work)
/* ---------- Events ---------- */
-static void sas_chain_work(struct sas_ha_struct *ha, struct work_struct *work)
+static void sas_chain_work(struct sas_ha_struct *ha, struct sas_work *sw)
{
- /* chained work is not subject to SA_HA_DRAINING or SAS_HA_REGISTERED */
- scsi_queue_work(ha->core.shost, work);
+ /* chained work is not subject to SA_HA_DRAINING or
+ * SAS_HA_REGISTERED, because it is either submitted in the
+ * workqueue, or known to be submitted from a context that is
+ * not racing against draining
+ */
+ scsi_queue_work(ha->core.shost, &sw->work);
}
static void sas_chain_event(int event, unsigned long *pending,
- struct work_struct *work,
+ struct sas_work *sw,
struct sas_ha_struct *ha)
{
if (!test_and_set_bit(event, pending)) {
unsigned long flags;
spin_lock_irqsave(&ha->state_lock, flags);
- sas_chain_work(ha, work);
+ sas_chain_work(ha, sw);
spin_unlock_irqrestore(&ha->state_lock, flags);
}
}
@@ -519,7 +528,7 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
disc->pending = 0;
for (i = 0; i < DISC_NUM_EVENTS; i++) {
- INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]);
+ INIT_SAS_WORK(&disc->disc_work[i].work, sas_event_fns[i]);
disc->disc_work[i].port = port;
}
}
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 16639bb..4e4292d 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -27,19 +27,21 @@
#include "sas_internal.h"
#include "sas_dump.h"
-void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work)
+void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
{
if (!test_bit(SAS_HA_REGISTERED, &ha->state))
return;
- if (test_bit(SAS_HA_DRAINING, &ha->state))
- list_add(&work->entry, &ha->defer_q);
- else
- scsi_queue_work(ha->core.shost, work);
+ if (test_bit(SAS_HA_DRAINING, &ha->state)) {
+ /* add it to the defer list, if not already pending */
+ if (list_empty(&sw->drain_node))
+ list_add(&sw->drain_node, &ha->defer_q);
+ } else
+ scsi_queue_work(ha->core.shost, &sw->work);
}
static void sas_queue_event(int event, unsigned long *pending,
- struct work_struct *work,
+ struct sas_work *work,
struct sas_ha_struct *ha)
{
if (!test_and_set_bit(event, pending)) {
@@ -55,7 +57,7 @@ static void sas_queue_event(int event, unsigned long *pending,
void __sas_drain_work(struct sas_ha_struct *ha)
{
struct workqueue_struct *wq = ha->core.shost->work_q;
- struct work_struct *w, *_w;
+ struct sas_work *sw, *_sw;
set_bit(SAS_HA_DRAINING, &ha->state);
/* flush submitters */
@@ -66,9 +68,9 @@ void __sas_drain_work(struct sas_ha_struct *ha)
spin_lock_irq(&ha->state_lock);
clear_bit(SAS_HA_DRAINING, &ha->state);
- list_for_each_entry_safe(w, _w, &ha->defer_q, entry) {
- list_del_init(&w->entry);
- sas_queue_work(ha, w);
+ list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
+ list_del_init(&sw->drain_node);
+ sas_queue_work(ha, sw);
}
spin_unlock_irq(&ha->state_lock);
}
@@ -151,7 +153,7 @@ int sas_init_events(struct sas_ha_struct *sas_ha)
int i;
for (i = 0; i < HA_NUM_EVENTS; i++) {
- INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
+ INIT_SAS_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
sas_ha->ha_events[i].ha = sas_ha;
}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 05acd9e..caa0525 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -202,6 +202,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
u8 sas_addr[SAS_ADDR_SIZE];
struct smp_resp *resp = rsp;
struct discover_resp *dr = &resp->disc;
+ struct sas_ha_struct *ha = dev->port->ha;
struct expander_device *ex = &dev->ex_dev;
struct ex_phy *phy = &ex->ex_phy[phy_id];
struct sas_rphy *rphy = dev->rphy;
@@ -209,6 +210,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
char *type;
if (new_phy) {
+ if (WARN_ON_ONCE(test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)))
+ return;
phy->phy = sas_phy_alloc(&rphy->dev, phy_id);
/* FIXME: error_handling */
@@ -233,6 +236,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
phy->attached_dev_type = to_dev_type(dr);
+ if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
+ goto out;
phy->phy_id = phy_id;
phy->linkrate = dr->linkrate;
phy->attached_sata_host = dr->attached_sata_host;
@@ -240,7 +245,14 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
phy->attached_sata_ps = dr->attached_sata_ps;
phy->attached_iproto = dr->iproto << 1;
phy->attached_tproto = dr->tproto << 1;
- memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
+ /* help some expanders that fail to zero sas_address in the 'no
+ * device' case
+ */
+ if (phy->attached_dev_type == NO_DEVICE ||
+ phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
+ memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
+ else
+ memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
phy->attached_phy_id = dr->attached_phy_id;
phy->phy_change_count = dr->change_count;
phy->routing_attr = dr->routing_attr;
@@ -266,6 +278,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
return;
}
+ out:
switch (phy->attached_dev_type) {
case SATA_PENDING:
type = "stp pending";
@@ -304,7 +317,15 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
else
return;
- SAS_DPRINTK("ex %016llx phy%02d:%c:%X attached: %016llx (%s)\n",
+ /* if the attached device type changed and ata_eh is active,
+ * make sure we run revalidation when eh completes (see:
+ * sas_enable_revalidation)
+ */
+ if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
+ set_bit(DISCE_REVALIDATE_DOMAIN, &dev->port->disc.pending);
+
+ SAS_DPRINTK("%sex %016llx phy%02d:%c:%X attached: %016llx (%s)\n",
+ test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state) ? "ata: " : "",
SAS_ADDR(dev->sas_addr), phy->phy_id,
sas_route_char(dev, phy), phy->linkrate,
SAS_ADDR(phy->attached_sas_addr), type);
@@ -776,13 +797,16 @@ static struct domain_device *sas_ex_discover_end_dev(
if (res)
goto out_free;
+ sas_init_dev(child);
+ res = sas_ata_init(child);
+ if (res)
+ goto out_free;
rphy = sas_end_device_alloc(phy->port);
- if (unlikely(!rphy))
+ if (!rphy)
goto out_free;
- sas_init_dev(child);
-
child->rphy = rphy;
+ get_device(&rphy->dev);
list_add_tail(&child->disco_list_node, &parent->port->disco_list);
@@ -806,6 +830,7 @@ static struct domain_device *sas_ex_discover_end_dev(
sas_init_dev(child);
child->rphy = rphy;
+ get_device(&rphy->dev);
sas_fill_in_rphy(child, rphy);
list_add_tail(&child->disco_list_node, &parent->port->disco_list);
@@ -830,8 +855,6 @@ static struct domain_device *sas_ex_discover_end_dev(
out_list_del:
sas_rphy_free(child->rphy);
- child->rphy = NULL;
-
list_del(&child->disco_list_node);
spin_lock_irq(&parent->port->dev_list_lock);
list_del(&child->dev_list_node);
@@ -911,6 +934,7 @@ static struct domain_device *sas_ex_discover_expander(
}
port = parent->port;
child->rphy = rphy;
+ get_device(&rphy->dev);
edev = rphy_to_expander_device(rphy);
child->dev_type = phy->attached_dev_type;
kref_get(&parent->kref);
@@ -934,6 +958,7 @@ static struct domain_device *sas_ex_discover_expander(
res = sas_discover_expander(child);
if (res) {
+ sas_rphy_delete(rphy);
spin_lock_irq(&parent->port->dev_list_lock);
list_del(&child->dev_list_node);
spin_unlock_irq(&parent->port->dev_list_lock);
@@ -1718,9 +1743,17 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
int phy_change_count = 0;
res = sas_get_phy_change_count(dev, i, &phy_change_count);
- if (res)
- goto out;
- else if (phy_change_count != ex->ex_phy[i].phy_change_count) {
+ switch (res) {
+ case SMP_RESP_PHY_VACANT:
+ case SMP_RESP_NO_PHY:
+ continue;
+ case SMP_RESP_FUNC_ACC:
+ break;
+ default:
+ return res;
+ }
+
+ if (phy_change_count != ex->ex_phy[i].phy_change_count) {
if (update)
ex->ex_phy[i].phy_change_count =
phy_change_count;
@@ -1728,8 +1761,7 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
return 0;
}
}
-out:
- return res;
+ return 0;
}
static int sas_get_ex_change_count(struct domain_device *dev, int *ecc)
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 120bff6..10cb5ae 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -94,8 +94,7 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
void sas_hae_reset(struct work_struct *work)
{
- struct sas_ha_event *ev =
- container_of(work, struct sas_ha_event, work);
+ struct sas_ha_event *ev = to_sas_ha_event(work);
struct sas_ha_struct *ha = ev->ha;
clear_bit(HAE_RESET, &ha->pending);
@@ -369,14 +368,14 @@ static void sas_phy_release(struct sas_phy *phy)
static void phy_reset_work(struct work_struct *work)
{
- struct sas_phy_data *d = container_of(work, typeof(*d), reset_work);
+ struct sas_phy_data *d = container_of(work, typeof(*d), reset_work.work);
d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset);
}
static void phy_enable_work(struct work_struct *work)
{
- struct sas_phy_data *d = container_of(work, typeof(*d), enable_work);
+ struct sas_phy_data *d = container_of(work, typeof(*d), enable_work.work);
d->enable_result = sas_phy_enable(d->phy, d->enable);
}
@@ -389,8 +388,8 @@ static int sas_phy_setup(struct sas_phy *phy)
return -ENOMEM;
mutex_init(&d->event_lock);
- INIT_WORK(&d->reset_work, phy_reset_work);
- INIT_WORK(&d->enable_work, phy_enable_work);
+ INIT_SAS_WORK(&d->reset_work, phy_reset_work);
+ INIT_SAS_WORK(&d->enable_work, phy_enable_work);
d->phy = phy;
phy->hostdata = d;
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index f05c638..507e4cf 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -45,10 +45,10 @@ struct sas_phy_data {
struct mutex event_lock;
int hard_reset;
int reset_result;
- struct work_struct reset_work;
+ struct sas_work reset_work;
int enable;
int enable_result;
- struct work_struct enable_work;
+ struct sas_work enable_work;
};
void sas_scsi_recover_host(struct Scsi_Host *shost);
@@ -80,7 +80,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work);
void sas_porte_link_reset_err(struct work_struct *work);
void sas_porte_timer_event(struct work_struct *work);
void sas_porte_hard_reset(struct work_struct *work);
-void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work);
+void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw);
int sas_notify_lldd_dev_found(struct domain_device *);
void sas_notify_lldd_dev_gone(struct domain_device *);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index dcfd4a9..521422e 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -32,8 +32,7 @@
static void sas_phye_loss_of_signal(struct work_struct *work)
{
- struct asd_sas_event *ev =
- container_of(work, struct asd_sas_event, work);
+ struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending);
@@ -43,8 +42,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
static void sas_phye_oob_done(struct work_struct *work)
{
- struct asd_sas_event *ev =
- container_of(work, struct asd_sas_event, work);
+ struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending);
@@ -53,8 +51,7 @@ static void sas_phye_oob_done(struct work_struct *work)
static void sas_phye_oob_error(struct work_struct *work)
{
- struct asd_sas_event *ev =
- container_of(work, struct asd_sas_event, work);
+ struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
struct sas_ha_struct *sas_ha = phy->ha;
struct asd_sas_port *port = phy->port;
@@ -85,8 +82,7 @@ static void sas_phye_oob_error(struct work_struct *work)
static void sas_phye_spinup_hold(struct work_struct *work)
{
- struct asd_sas_event *ev =
- container_of(work, struct asd_sas_event, work);
+ struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *i =
@@ -127,14 +123,12 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
phy->error = 0;
INIT_LIST_HEAD(&phy->port_phy_el);
for (k = 0; k < PORT_NUM_EVENTS; k++) {
- INIT_WORK(&phy->port_events[k].work,
- sas_port_event_fns[k]);
+ INIT_SAS_WORK(&phy->port_events[k].work, sas_port_event_fns[k]);
phy->port_events[k].phy = phy;
}
for (k = 0; k < PHY_NUM_EVENTS; k++) {
- INIT_WORK(&phy->phy_events[k].work,
- sas_phy_event_fns[k]);
+ INIT_SAS_WORK(&phy->phy_events[k].work, sas_phy_event_fns[k]);
phy->phy_events[k].phy = phy;
}
@@ -144,8 +138,7 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
spin_lock_init(&phy->sas_prim_lock);
phy->frame_rcvd_size = 0;
- phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev,
- i);
+ phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev, i);
if (!phy->phy)
return -ENOMEM;
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index eb19c01..e884a8c 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -123,7 +123,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
if (!port->port) {
- port->port = sas_port_alloc(phy->phy->dev.parent, phy->id);
+ port->port = sas_port_alloc(phy->phy->dev.parent, port->id);
BUG_ON(!port->port);
sas_port_add(port->port);
}
@@ -208,8 +208,7 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
void sas_porte_bytes_dmaed(struct work_struct *work)
{
- struct asd_sas_event *ev =
- container_of(work, struct asd_sas_event, work);
+ struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending);
@@ -219,8 +218,7 @@ void sas_porte_bytes_dmaed(struct work_struct *work)
void sas_porte_broadcast_rcvd(struct work_struct *work)
{
- struct asd_sas_event *ev =
- container_of(work, struct asd_sas_event, work);
+ struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
unsigned long flags;
u32 prim;
@@ -237,8 +235,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work)
void sas_porte_link_reset_err(struct work_struct *work)
{
- struct asd_sas_event *ev =
- container_of(work, struct asd_sas_event, work);
+ struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending);
@@ -248,8 +245,7 @@ void sas_porte_link_reset_err(struct work_struct *work)
void sas_porte_timer_event(struct work_struct *work)
{
- struct asd_sas_event *ev =
- container_of(work, struct asd_sas_event, work);
+ struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending);
@@ -259,8 +255,7 @@ void sas_porte_timer_event(struct work_struct *work)
void sas_porte_hard_reset(struct work_struct *work)
{
- struct asd_sas_event *ev =
- container_of(work, struct asd_sas_event, work);
+ struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
clear_bit(PORTE_HARD_RESET, &phy->port_events_pending);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index f74cc06..bc3cc6d 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1367,6 +1367,9 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
struct qla_hw_data *ha = vha->hw;
int rval = 0;
+ if (ha->flags.isp82xx_reset_hdlr_active)
+ return -EBUSY;
+
rval = qla2x00_optrom_setup(bsg_job, vha, 0);
if (rval)
return rval;
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 897731b..62324a1 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -15,7 +15,7 @@
* | Mailbox commands | 0x113e | 0x112c-0x112e |
* | | | 0x113a |
* | Device Discovery | 0x2086 | 0x2020-0x2022 |
- * | Queue Command and IO tracing | 0x302f | 0x3006,0x3008 |
+ * | Queue Command and IO tracing | 0x3030 | 0x3006,0x3008 |
* | | | 0x302d-0x302e |
* | DPC Thread | 0x401c | |
* | Async Events | 0x505d | 0x502b-0x502f |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index f79844c..ce42288 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1715,13 +1715,24 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
res = DID_ERROR << 16;
break;
}
- } else {
+ } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
+ lscsi_status != SAM_STAT_BUSY) {
+ /*
+ * scsi status of task set and busy are considered to be
+ * task not completed.
+ */
+
ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
"Dropped frame(s) detected (0x%x "
- "of 0x%x bytes).\n", resid, scsi_bufflen(cp));
+ "of 0x%x bytes).\n", resid,
+ scsi_bufflen(cp));
res = DID_ERROR << 16 | lscsi_status;
goto check_scsi_status;
+ } else {
+ ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
+ "scsi_status: 0x%x, lscsi_status: 0x%x\n",
+ scsi_status, lscsi_status);
}
res = DID_OK << 16 | lscsi_status;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index f052853..de722a9 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -3125,6 +3125,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
ql_log(ql_log_info, vha, 0x00b7,
"HW State: COLD/RE-INIT.\n");
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
+ qla82xx_set_rst_ready(ha);
if (ql2xmdenable) {
if (qla82xx_md_collect(vha))
ql_log(ql_log_warn, vha, 0xb02c,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index a2f9992..7db8033 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3577,9 +3577,25 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
continue;
/* Attempt a retry. */
status = 1;
- } else
+ } else {
status = qla2x00_fabric_login(vha,
fcport, &next_loopid);
+ if (status == QLA_SUCCESS) {
+ int status2;
+ uint8_t opts;
+
+ opts = 0;
+ if (fcport->flags &
+ FCF_FCP2_DEVICE)
+ opts |= BIT_1;
+ status2 =
+ qla2x00_get_port_database(
+ vha, fcport,
+ opts);
+ if (status2 != QLA_SUCCESS)
+ status = 1;
+ }
+ }
} else
status = qla2x00_local_device_login(vha,
fcport);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 3c13c0a..a683e76 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1017,6 +1017,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
!IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
return;
+ if (ha->flags.isp82xx_reset_hdlr_active)
+ return;
+
ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
if (hdr.version == __constant_cpu_to_le16(0xffff))
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 29d780c..f5fdb16 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.07.13-k"
+#define QLA2XXX_VERSION "8.04.00.03-k"
#define QLA_DRIVER_MAJOR_VER 8
-#define QLA_DRIVER_MINOR_VER 3
-#define QLA_DRIVER_PATCH_VER 7
+#define QLA_DRIVER_MINOR_VER 4
+#define QLA_DRIVER_PATCH_VER 0
#define QLA_DRIVER_BETA_VER 3
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index dbe4392..62ddfd3 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1638,7 +1638,7 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
request_fn_proc *request_fn)
{
struct request_queue *q;
- struct device *dev = shost->shost_gendev.parent;
+ struct device *dev = shost->dma_dev;
q = blk_init_queue(request_fn, NULL);
if (!q)
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index efccd72..1b38431 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -175,7 +175,8 @@ static void virtscsi_complete_free(void *buf)
if (cmd->comp)
complete_all(cmd->comp);
- mempool_free(cmd, virtscsi_cmd_pool);
+ else
+ mempool_free(cmd, virtscsi_cmd_pool);
}
static void virtscsi_ctrl_done(struct virtqueue *vq)
@@ -311,21 +312,22 @@ out:
static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
{
DECLARE_COMPLETION_ONSTACK(comp);
- int ret;
+ int ret = FAILED;
cmd->comp = &comp;
- ret = virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd,
- sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
- GFP_NOIO);
- if (ret < 0)
- return FAILED;
+ if (virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd,
+ sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
+ GFP_NOIO) < 0)
+ goto out;
wait_for_completion(&comp);
- if (cmd->resp.tmf.response != VIRTIO_SCSI_S_OK &&
- cmd->resp.tmf.response != VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
- return FAILED;
+ if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
+ cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
+ ret = SUCCESS;
- return SUCCESS;
+out:
+ mempool_free(cmd, virtscsi_cmd_pool);
+ return ret;
}
static int virtscsi_device_reset(struct scsi_cmnd *sc)
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 3ed7483..00c0240 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -74,7 +74,7 @@ config SPI_ATMEL
This selects a driver for the Atmel SPI Controller, present on
many AT32 (AVR32) and AT91 (ARM) chips.
-config SPI_BFIN
+config SPI_BFIN5XX
tristate "SPI controller driver for ADI Blackfin5xx"
depends on BLACKFIN
help
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index a1d48e0..9d75d21 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -15,7 +15,7 @@ obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
-obj-$(CONFIG_SPI_BFIN) += spi-bfin5xx.o
+obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o
obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o
obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index f01b264..7491971 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -1,7 +1,7 @@
/*
* Broadcom BCM63xx SPI controller support
*
- * Copyright (C) 2009-2011 Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2009-2012 Florian Fainelli <florian@openwrt.org>
* Copyright (C) 2010 Tanguy Bouzeloc <tanguy.bouzeloc@efixo.com>
*
* This program is free software; you can redistribute it and/or
@@ -30,6 +30,8 @@
#include <linux/spi/spi.h>
#include <linux/completion.h>
#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
#include <bcm63xx_dev_spi.h>
@@ -37,8 +39,6 @@
#define DRV_VER "0.1.2"
struct bcm63xx_spi {
- spinlock_t lock;
- int stopping;
struct completion done;
void __iomem *regs;
@@ -96,17 +96,12 @@ static const unsigned bcm63xx_spi_freq_table[SPI_CLK_MASK][2] = {
{ 391000, SPI_CLK_0_391MHZ }
};
-static int bcm63xx_spi_setup_transfer(struct spi_device *spi,
- struct spi_transfer *t)
+static int bcm63xx_spi_check_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
{
- struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
u8 bits_per_word;
- u8 clk_cfg, reg;
- u32 hz;
- int i;
bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
- hz = (t) ? t->speed_hz : spi->max_speed_hz;
if (bits_per_word != 8) {
dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
__func__, bits_per_word);
@@ -119,6 +114,19 @@ static int bcm63xx_spi_setup_transfer(struct spi_device *spi,
return -EINVAL;
}
+ return 0;
+}
+
+static void bcm63xx_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
+ u32 hz;
+ u8 clk_cfg, reg;
+ int i;
+
+ hz = (t) ? t->speed_hz : spi->max_speed_hz;
+
/* Find the closest clock configuration */
for (i = 0; i < SPI_CLK_MASK; i++) {
if (hz <= bcm63xx_spi_freq_table[i][0]) {
@@ -139,8 +147,6 @@ static int bcm63xx_spi_setup_transfer(struct spi_device *spi,
bcm_spi_writeb(bs, reg, SPI_CLK_CFG);
dev_dbg(&spi->dev, "Setting clock register to %02x (hz %d)\n",
clk_cfg, hz);
-
- return 0;
}
/* the spi->mode bits understood by this driver: */
@@ -153,9 +159,6 @@ static int bcm63xx_spi_setup(struct spi_device *spi)
bs = spi_master_get_devdata(spi->master);
- if (bs->stopping)
- return -ESHUTDOWN;
-
if (!spi->bits_per_word)
spi->bits_per_word = 8;
@@ -165,7 +168,7 @@ static int bcm63xx_spi_setup(struct spi_device *spi)
return -EINVAL;
}
- ret = bcm63xx_spi_setup_transfer(spi, NULL);
+ ret = bcm63xx_spi_check_transfer(spi, NULL);
if (ret < 0) {
dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
spi->mode & ~MODEBITS);
@@ -190,28 +193,29 @@ static void bcm63xx_spi_fill_tx_fifo(struct bcm63xx_spi *bs)
bs->remaining_bytes -= size;
}
-static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi,
+ struct spi_transfer *t)
{
struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
u16 msg_ctl;
u16 cmd;
+ /* Disable the CMD_DONE interrupt */
+ bcm_spi_writeb(bs, 0, SPI_INT_MASK);
+
dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
t->tx_buf, t->rx_buf, t->len);
/* Transmitter is inhibited */
bs->tx_ptr = t->tx_buf;
bs->rx_ptr = t->rx_buf;
- init_completion(&bs->done);
if (t->tx_buf) {
bs->remaining_bytes = t->len;
bcm63xx_spi_fill_tx_fifo(bs);
}
- /* Enable the command done interrupt which
- * we use to determine completion of a command */
- bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
+ init_completion(&bs->done);
/* Fill in the Message control register */
msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT);
@@ -230,33 +234,76 @@ static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT);
cmd |= (spi->chip_select << SPI_CMD_DEVICE_ID_SHIFT);
bcm_spi_writew(bs, cmd, SPI_CMD);
- wait_for_completion(&bs->done);
- /* Disable the CMD_DONE interrupt */
- bcm_spi_writeb(bs, 0, SPI_INT_MASK);
+ /* Enable the CMD_DONE interrupt */
+ bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
return t->len - bs->remaining_bytes;
}
-static int bcm63xx_transfer(struct spi_device *spi, struct spi_message *m)
+static int bcm63xx_spi_prepare_transfer(struct spi_master *master)
{
- struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
- struct spi_transfer *t;
- int ret = 0;
+ struct bcm63xx_spi *bs = spi_master_get_devdata(master);
- if (unlikely(list_empty(&m->transfers)))
- return -EINVAL;
+ pm_runtime_get_sync(&bs->pdev->dev);
- if (bs->stopping)
- return -ESHUTDOWN;
+ return 0;
+}
+
+static int bcm63xx_spi_unprepare_transfer(struct spi_master *master)
+{
+ struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+
+ pm_runtime_put(&bs->pdev->dev);
+
+ return 0;
+}
+
+static int bcm63xx_spi_transfer_one(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+ struct spi_transfer *t;
+ struct spi_device *spi = m->spi;
+ int status = 0;
+ unsigned int timeout = 0;
list_for_each_entry(t, &m->transfers, transfer_list) {
- ret += bcm63xx_txrx_bufs(spi, t);
- }
+ unsigned int len = t->len;
+ u8 rx_tail;
- m->complete(m->context);
+ status = bcm63xx_spi_check_transfer(spi, t);
+ if (status < 0)
+ goto exit;
- return ret;
+ /* configure adapter for a new transfer */
+ bcm63xx_spi_setup_transfer(spi, t);
+
+ while (len) {
+ /* send the data */
+ len -= bcm63xx_txrx_bufs(spi, t);
+
+ timeout = wait_for_completion_timeout(&bs->done, HZ);
+ if (!timeout) {
+ status = -ETIMEDOUT;
+ goto exit;
+ }
+
+ /* read out all data */
+ rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL);
+
+ /* Read out all the data */
+ if (rx_tail)
+ memcpy_fromio(bs->rx_ptr, bs->rx_io, rx_tail);
+ }
+
+ m->actual_length += t->len;
+ }
+exit:
+ m->status = status;
+ spi_finalize_current_message(master);
+
+ return 0;
}
/* This driver supports single master mode only. Hence
@@ -267,39 +314,15 @@ static irqreturn_t bcm63xx_spi_interrupt(int irq, void *dev_id)
struct spi_master *master = (struct spi_master *)dev_id;
struct bcm63xx_spi *bs = spi_master_get_devdata(master);
u8 intr;
- u16 cmd;
/* Read interupts and clear them immediately */
intr = bcm_spi_readb(bs, SPI_INT_STATUS);
bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
bcm_spi_writeb(bs, 0, SPI_INT_MASK);
- /* A tansfer completed */
- if (intr & SPI_INTR_CMD_DONE) {
- u8 rx_tail;
-
- rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL);
-
- /* Read out all the data */
- if (rx_tail)
- memcpy_fromio(bs->rx_ptr, bs->rx_io, rx_tail);
-
- /* See if there is more data to send */
- if (bs->remaining_bytes > 0) {
- bcm63xx_spi_fill_tx_fifo(bs);
-
- /* Start the transfer */
- bcm_spi_writew(bs, SPI_HD_W << SPI_MSG_TYPE_SHIFT,
- SPI_MSG_CTL);
- cmd = bcm_spi_readw(bs, SPI_CMD);
- cmd |= SPI_CMD_START_IMMEDIATE;
- cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT);
- bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
- bcm_spi_writew(bs, cmd, SPI_CMD);
- } else {
- complete(&bs->done);
- }
- }
+ /* A transfer completed */
+ if (intr & SPI_INTR_CMD_DONE)
+ complete(&bs->done);
return IRQ_HANDLED;
}
@@ -345,7 +368,6 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev)
}
bs = spi_master_get_devdata(master);
- init_completion(&bs->done);
platform_set_drvdata(pdev, master);
bs->pdev = pdev;
@@ -379,12 +401,13 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev)
master->bus_num = pdata->bus_num;
master->num_chipselect = pdata->num_chipselect;
master->setup = bcm63xx_spi_setup;
- master->transfer = bcm63xx_transfer;
+ master->prepare_transfer_hardware = bcm63xx_spi_prepare_transfer;
+ master->unprepare_transfer_hardware = bcm63xx_spi_unprepare_transfer;
+ master->transfer_one_message = bcm63xx_spi_transfer_one;
+ master->mode_bits = MODEBITS;
bs->speed_hz = pdata->speed_hz;
- bs->stopping = 0;
bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA));
bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA));
- spin_lock_init(&bs->lock);
/* Initialize hardware */
clk_enable(bs->clk);
@@ -418,18 +441,16 @@ static int __devexit bcm63xx_spi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+ spi_unregister_master(master);
+
/* reset spi block */
bcm_spi_writeb(bs, 0, SPI_INT_MASK);
- spin_lock(&bs->lock);
- bs->stopping = 1;
/* HW shutdown */
clk_disable(bs->clk);
clk_put(bs->clk);
- spin_unlock(&bs->lock);
platform_set_drvdata(pdev, 0);
- spi_unregister_master(master);
return 0;
}
diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c
index 248a2cc..1fe5119 100644
--- a/drivers/spi/spi-bfin-sport.c
+++ b/drivers/spi/spi-bfin-sport.c
@@ -252,19 +252,15 @@ static void
bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data)
{
struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
- unsigned int bits = (drv_data->ops == &bfin_sport_transfer_ops_u8 ? 7 : 15);
bfin_sport_spi_disable(drv_data);
dev_dbg(drv_data->dev, "restoring spi ctl state\n");
bfin_write(&drv_data->regs->tcr1, chip->ctl_reg);
- bfin_write(&drv_data->regs->tcr2, bits);
bfin_write(&drv_data->regs->tclkdiv, chip->baud);
- bfin_write(&drv_data->regs->tfsdiv, bits);
SSYNC();
bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS));
- bfin_write(&drv_data->regs->rcr2, bits);
SSYNC();
bfin_sport_spi_cs_active(chip);
@@ -420,11 +416,15 @@ bfin_sport_spi_pump_transfers(unsigned long data)
drv_data->cs_change = transfer->cs_change;
/* Bits per word setup */
- bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word;
- if (bits_per_word == 8)
- drv_data->ops = &bfin_sport_transfer_ops_u8;
- else
+ bits_per_word = transfer->bits_per_word ? :
+ message->spi->bits_per_word ? : 8;
+ if (bits_per_word % 16 == 0)
drv_data->ops = &bfin_sport_transfer_ops_u16;
+ else
+ drv_data->ops = &bfin_sport_transfer_ops_u8;
+ bfin_write(&drv_data->regs->tcr2, bits_per_word - 1);
+ bfin_write(&drv_data->regs->tfsdiv, bits_per_word - 1);
+ bfin_write(&drv_data->regs->rcr2, bits_per_word - 1);
drv_data->state = RUNNING_STATE;
@@ -598,11 +598,12 @@ bfin_sport_spi_setup(struct spi_device *spi)
}
chip->cs_chg_udelay = chip_info->cs_chg_udelay;
chip->idle_tx_val = chip_info->idle_tx_val;
- spi->bits_per_word = chip_info->bits_per_word;
}
}
- if (spi->bits_per_word != 8 && spi->bits_per_word != 16) {
+ if (spi->bits_per_word % 8) {
+ dev_err(&spi->dev, "%d bits_per_word is not supported\n",
+ spi->bits_per_word);
ret = -EINVAL;
goto error;
}
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index 3b83ff8..9bb4d4a 100644
--- a/drivers/spi/spi-bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -396,7 +396,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
/* last read */
if (drv_data->rx) {
dev_dbg(&drv_data->pdev->dev, "last read\n");
- if (n_bytes % 2) {
+ if (!(n_bytes % 2)) {
u16 *buf = (u16 *)drv_data->rx;
for (loop = 0; loop < n_bytes / 2; loop++)
*buf++ = bfin_read(&drv_data->regs->rdbr);
@@ -424,7 +424,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
if (drv_data->rx && drv_data->tx) {
/* duplex */
dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n");
- if (n_bytes % 2) {
+ if (!(n_bytes % 2)) {
u16 *buf = (u16 *)drv_data->rx;
u16 *buf2 = (u16 *)drv_data->tx;
for (loop = 0; loop < n_bytes / 2; loop++) {
@@ -442,7 +442,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
} else if (drv_data->rx) {
/* read */
dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n");
- if (n_bytes % 2) {
+ if (!(n_bytes % 2)) {
u16 *buf = (u16 *)drv_data->rx;
for (loop = 0; loop < n_bytes / 2; loop++) {
*buf++ = bfin_read(&drv_data->regs->rdbr);
@@ -458,7 +458,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
} else if (drv_data->tx) {
/* write */
dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n");
- if (n_bytes % 2) {
+ if (!(n_bytes % 2)) {
u16 *buf = (u16 *)drv_data->tx;
for (loop = 0; loop < n_bytes / 2; loop++) {
bfin_read(&drv_data->regs->rdbr);
@@ -587,6 +587,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
if (message->state == DONE_STATE) {
dev_dbg(&drv_data->pdev->dev, "transfer: all done!\n");
message->status = 0;
+ bfin_spi_flush(drv_data);
bfin_spi_giveback(drv_data);
return;
}
@@ -870,8 +871,10 @@ static void bfin_spi_pump_transfers(unsigned long data)
message->actual_length += drv_data->len_in_bytes;
/* Move to next transfer of this msg */
message->state = bfin_spi_next_transfer(drv_data);
- if (drv_data->cs_change)
+ if (drv_data->cs_change && message->state != DONE_STATE) {
+ bfin_spi_flush(drv_data);
bfin_spi_cs_deactive(drv_data, chip);
+ }
}
/* Schedule next transfer tasklet */
@@ -1026,7 +1029,6 @@ static int bfin_spi_setup(struct spi_device *spi)
chip->cs_chg_udelay = chip_info->cs_chg_udelay;
chip->idle_tx_val = chip_info->idle_tx_val;
chip->pio_interrupt = chip_info->pio_interrupt;
- spi->bits_per_word = chip_info->bits_per_word;
} else {
/* force a default base state */
chip->ctl_reg &= bfin_ctl_reg;
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 6db2887..e805507 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -545,13 +545,12 @@ static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
* in case of failure.
*/
static struct dma_async_tx_descriptor *
-ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
+ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir)
{
struct spi_transfer *t = espi->current_msg->state;
struct dma_async_tx_descriptor *txd;
enum dma_slave_buswidth buswidth;
struct dma_slave_config conf;
- enum dma_transfer_direction slave_dirn;
struct scatterlist *sg;
struct sg_table *sgt;
struct dma_chan *chan;
@@ -567,14 +566,13 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
memset(&conf, 0, sizeof(conf));
conf.direction = dir;
- if (dir == DMA_FROM_DEVICE) {
+ if (dir == DMA_DEV_TO_MEM) {
chan = espi->dma_rx;
buf = t->rx_buf;
sgt = &espi->rx_sgt;
conf.src_addr = espi->sspdr_phys;
conf.src_addr_width = buswidth;
- slave_dirn = DMA_DEV_TO_MEM;
} else {
chan = espi->dma_tx;
buf = t->tx_buf;
@@ -582,7 +580,6 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
conf.dst_addr = espi->sspdr_phys;
conf.dst_addr_width = buswidth;
- slave_dirn = DMA_MEM_TO_DEV;
}
ret = dmaengine_slave_config(chan, &conf);
@@ -633,8 +630,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
if (!nents)
return ERR_PTR(-ENOMEM);
- txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents,
- slave_dirn, DMA_CTRL_ACK);
+ txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK);
if (!txd) {
dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
return ERR_PTR(-ENOMEM);
@@ -651,12 +647,12 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
* unmapped.
*/
static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
- enum dma_data_direction dir)
+ enum dma_transfer_direction dir)
{
struct dma_chan *chan;
struct sg_table *sgt;
- if (dir == DMA_FROM_DEVICE) {
+ if (dir == DMA_DEV_TO_MEM) {
chan = espi->dma_rx;
sgt = &espi->rx_sgt;
} else {
@@ -677,16 +673,16 @@ static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
struct spi_message *msg = espi->current_msg;
struct dma_async_tx_descriptor *rxd, *txd;
- rxd = ep93xx_spi_dma_prepare(espi, DMA_FROM_DEVICE);
+ rxd = ep93xx_spi_dma_prepare(espi, DMA_DEV_TO_MEM);
if (IS_ERR(rxd)) {
dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
msg->status = PTR_ERR(rxd);
return;
}
- txd = ep93xx_spi_dma_prepare(espi, DMA_TO_DEVICE);
+ txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV);
if (IS_ERR(txd)) {
- ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE);
+ ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
msg->status = PTR_ERR(txd);
return;
@@ -705,8 +701,8 @@ static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
wait_for_completion(&espi->wait);
- ep93xx_spi_dma_finish(espi, DMA_TO_DEVICE);
- ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE);
+ ep93xx_spi_dma_finish(espi, DMA_MEM_TO_DEV);
+ ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
}
/**
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 09c925a..400ae21 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1667,9 +1667,15 @@ static int calculate_effective_freq(struct pl022 *pl022, int freq, struct
/* cpsdvsr = 254 & scr = 255 */
min_tclk = spi_rate(rate, CPSDVR_MAX, SCR_MAX);
- if (!((freq <= max_tclk) && (freq >= min_tclk))) {
+ if (freq > max_tclk)
+ dev_warn(&pl022->adev->dev,
+ "Max speed that can be programmed is %d Hz, you requested %d\n",
+ max_tclk, freq);
+
+ if (freq < min_tclk) {
dev_err(&pl022->adev->dev,
- "controller data is incorrect: out of range frequency");
+ "Requested frequency: %d Hz is less than minimum possible %d Hz\n",
+ freq, min_tclk);
return -EINVAL;
}
@@ -1681,26 +1687,37 @@ static int calculate_effective_freq(struct pl022 *pl022, int freq, struct
while (scr <= SCR_MAX) {
tmp = spi_rate(rate, cpsdvsr, scr);
- if (tmp > freq)
+ if (tmp > freq) {
+ /* we need lower freq */
scr++;
+ continue;
+ }
+
/*
- * If found exact value, update and break.
- * If found more closer value, update and continue.
+ * If found exact value, mark found and break.
+ * If found more closer value, update and break.
*/
- else if ((tmp == freq) || (tmp > best_freq)) {
+ if (tmp > best_freq) {
best_freq = tmp;
best_cpsdvsr = cpsdvsr;
best_scr = scr;
if (tmp == freq)
- break;
+ found = 1;
}
- scr++;
+ /*
+ * increased scr will give lower rates, which are not
+ * required
+ */
+ break;
}
cpsdvsr += 2;
scr = SCR_MIN;
}
+ WARN(!best_freq, "pl022: Matching cpsdvsr and scr not found for %d Hz rate \n",
+ freq);
+
clk_freq->cpsdvsr = (u8) (best_cpsdvsr & 0xFF);
clk_freq->scr = (u8) (best_scr & 0xFF);
dev_dbg(&pl022->adev->dev,
@@ -1823,9 +1840,12 @@ static int pl022_setup(struct spi_device *spi)
} else
chip->cs_control = chip_info->cs_control;
- if (bits <= 3) {
- /* PL022 doesn't support less than 4-bits */
+ /* Check bits per word with vendor specific range */
+ if ((bits <= 3) || (bits > pl022->vendor->max_bpw)) {
status = -ENOTSUPP;
+ dev_err(&spi->dev, "illegal data size for this controller!\n");
+ dev_err(&spi->dev, "This controller can only handle 4 <= n <= %d bit words\n",
+ pl022->vendor->max_bpw);
goto err_config_params;
} else if (bits <= 8) {
dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n");
@@ -1838,20 +1858,10 @@ static int pl022_setup(struct spi_device *spi)
chip->read = READING_U16;
chip->write = WRITING_U16;
} else {
- if (pl022->vendor->max_bpw >= 32) {
- dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
- chip->n_bytes = 4;
- chip->read = READING_U32;
- chip->write = WRITING_U32;
- } else {
- dev_err(&spi->dev,
- "illegal data size for this controller!\n");
- dev_err(&spi->dev,
- "a standard pl022 can only handle "
- "1 <= n <= 16 bit words\n");
- status = -ENOTSUPP;
- goto err_config_params;
- }
+ dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
+ chip->n_bytes = 4;
+ chip->read = READING_U32;
+ chip->write = WRITING_U32;
}
/* Now Initialize all register settings required for this chip */
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 400df8c..d91751f 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -36,6 +36,7 @@
#include <linux/prefetch.h>
#include <linux/ratelimit.h>
#include <linux/smp.h>
+#include <linux/interrupt.h>
#include <net/dst.h>
#ifdef CONFIG_XFRM
#include <linux/xfrm.h>
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 56d74dc..91a97b3 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -32,6 +32,7 @@
#include <linux/ip.h>
#include <linux/ratelimit.h>
#include <linux/string.h>
+#include <linux/interrupt.h>
#include <net/dst.h>
#ifdef CONFIG_XFRM
#include <linux/xfrm.h>
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 9112cd8..60cba81 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -31,6 +31,7 @@
#include <linux/etherdevice.h>
#include <linux/phy.h>
#include <linux/slab.h>
+#include <linux/interrupt.h>
#include <net/dst.h>
diff --git a/drivers/staging/ozwpan/ozpd.c b/drivers/staging/ozwpan/ozpd.c
index 2b45d3d..04cd57f 100644
--- a/drivers/staging/ozwpan/ozpd.c
+++ b/drivers/staging/ozwpan/ozpd.c
@@ -383,8 +383,6 @@ static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
pd->tx_pool = &f->link;
pd->tx_pool_count++;
f = 0;
- } else {
- kfree(f);
}
spin_unlock_bh(&pd->tx_frame_lock);
if (f)
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index 7862513..9cf29fc 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -79,10 +79,6 @@
#define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190)
#define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194)
-#define OMAP343X_CTRL_REGADDR(reg) \
- OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg))
-
-
/* Forward Declarations: */
static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
@@ -418,19 +414,27 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
/* Assert RST1 i.e only the RST only for DSP megacell */
if (!status) {
+ /*
+ * XXX: ioremapping MUST be removed once ctrl
+ * function is made available.
+ */
+ void __iomem *ctrl = ioremap(OMAP343X_CTRL_BASE, SZ_4K);
+ if (!ctrl)
+ return -ENOMEM;
+
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
OMAP2_RM_RSTCTRL);
/* Mask address with 1K for compatibility */
__raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK,
- OMAP343X_CTRL_REGADDR(
- OMAP343X_CONTROL_IVA2_BOOTADDR));
+ ctrl + OMAP343X_CONTROL_IVA2_BOOTADDR);
/*
* Set bootmode to self loop if dsp_debug flag is true
*/
__raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0,
- OMAP343X_CTRL_REGADDR(
- OMAP343X_CONTROL_IVA2_BOOTMOD));
+ ctrl + OMAP343X_CONTROL_IVA2_BOOTMOD);
+
+ iounmap(ctrl);
}
}
if (!status) {
diff --git a/drivers/staging/tidspbridge/core/wdt.c b/drivers/staging/tidspbridge/core/wdt.c
index 70055c8..870f934 100644
--- a/drivers/staging/tidspbridge/core/wdt.c
+++ b/drivers/staging/tidspbridge/core/wdt.c
@@ -53,7 +53,10 @@ int dsp_wdt_init(void)
int ret = 0;
dsp_wdt.sm_wdt = NULL;
- dsp_wdt.reg_base = OMAP2_L4_IO_ADDRESS(OMAP34XX_WDT3_BASE);
+ dsp_wdt.reg_base = ioremap(OMAP34XX_WDT3_BASE, SZ_4K);
+ if (!dsp_wdt.reg_base)
+ return -ENOMEM;
+
tasklet_init(&dsp_wdt.wdt3_tasklet, dsp_wdt_dpc, 0);
dsp_wdt.fclk = clk_get(NULL, "wdt3_fck");
@@ -99,6 +102,9 @@ void dsp_wdt_exit(void)
dsp_wdt.fclk = NULL;
dsp_wdt.iclk = NULL;
dsp_wdt.sm_wdt = NULL;
+
+ if (dsp_wdt.reg_base)
+ iounmap(dsp_wdt.reg_base);
dsp_wdt.reg_base = NULL;
}
diff --git a/drivers/staging/zcache/Kconfig b/drivers/staging/zcache/Kconfig
index 3ed2c8f..7048e01 100644
--- a/drivers/staging/zcache/Kconfig
+++ b/drivers/staging/zcache/Kconfig
@@ -2,7 +2,7 @@ config ZCACHE
bool "Dynamic compression of swap pages and clean pagecache pages"
# X86 dependency is because zsmalloc uses non-portable pte/tlb
# functions
- depends on (CLEANCACHE || FRONTSWAP) && CRYPTO && X86
+ depends on (CLEANCACHE || FRONTSWAP) && CRYPTO=y && X86
select ZSMALLOC
select CRYPTO_LZO
default n
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 70c3ffb..e320ec2 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -60,7 +60,6 @@ static void core_clear_initiator_node_from_tpg(
int i;
struct se_dev_entry *deve;
struct se_lun *lun;
- struct se_lun_acl *acl, *acl_tmp;
spin_lock_irq(&nacl->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
@@ -81,28 +80,7 @@ static void core_clear_initiator_node_from_tpg(
core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
- spin_lock(&lun->lun_acl_lock);
- list_for_each_entry_safe(acl, acl_tmp,
- &lun->lun_acl_list, lacl_list) {
- if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
- (acl->mapped_lun == deve->mapped_lun))
- break;
- }
-
- if (!acl) {
- pr_err("Unable to locate struct se_lun_acl for %s,"
- " mapped_lun: %u\n", nacl->initiatorname,
- deve->mapped_lun);
- spin_unlock(&lun->lun_acl_lock);
- spin_lock_irq(&nacl->device_list_lock);
- continue;
- }
-
- list_del(&acl->lacl_list);
- spin_unlock(&lun->lun_acl_lock);
-
spin_lock_irq(&nacl->device_list_lock);
- kfree(acl);
}
spin_unlock_irq(&nacl->device_list_lock);
}
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 24145c3..6cc4358 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1073,8 +1073,10 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
(new_serial.close_delay != port->close_delay) ||
(new_serial.xmit_fifo_size != state->xmit_fifo_size) ||
((new_serial.flags & ~ASYNC_USR_MASK) !=
- (port->flags & ~ASYNC_USR_MASK)))
+ (port->flags & ~ASYNC_USR_MASK))) {
+ tty_unlock();
return -EPERM;
+ }
port->flags = ((port->flags & ~ASYNC_USR_MASK) |
(new_serial.flags & ASYNC_USR_MASK));
state->custom_divisor = new_serial.custom_divisor;
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index e6c3dbd..836fe273 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -154,10 +154,9 @@ static irqreturn_t clps711xuart_int_tx(int irq, void *dev_id)
port->x_char = 0;
return IRQ_HANDLED;
}
- if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
- clps711xuart_stop_tx(port);
- return IRQ_HANDLED;
- }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port))
+ goto disable_tx_irq;
count = port->fifosize >> 1;
do {
@@ -171,8 +170,11 @@ static irqreturn_t clps711xuart_int_tx(int irq, void *dev_id)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- if (uart_circ_empty(xmit))
- clps711xuart_stop_tx(port);
+ if (uart_circ_empty(xmit)) {
+ disable_tx_irq:
+ disable_irq_nosync(TX_IRQ(port));
+ tx_enabled(port) = 0;
+ }
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index bbbec4a..c2816f4 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1447,9 +1447,11 @@ static int pch_uart_verify_port(struct uart_port *port,
__func__);
return -EOPNOTSUPP;
#endif
- priv->use_dma = 1;
priv->use_dma_flag = 1;
dev_info(priv->port.dev, "PCH UART : Use DMA Mode\n");
+ if (!priv->use_dma)
+ pch_request_dma(port);
+ priv->use_dma = 1;
}
return 0;
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 08ebe90..654755a 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -469,7 +469,7 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
tty = NULL;
if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
if (!ZS_IS_OPEN(uap_a)) {
- pmz_debug("ChanA interrupt while open !\n");
+ pmz_debug("ChanA interrupt while not open !\n");
goto skip_a;
}
write_zsreg(uap_a, R0, RES_H_IUS);
@@ -493,8 +493,8 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
spin_lock(&uap_b->port.lock);
tty = NULL;
if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
- if (!ZS_IS_OPEN(uap_a)) {
- pmz_debug("ChanB interrupt while open !\n");
+ if (!ZS_IS_OPEN(uap_b)) {
+ pmz_debug("ChanB interrupt while not open !\n");
goto skip_b;
}
write_zsreg(uap_b, R0, RES_H_IUS);
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index 86dd1e3..29ca20d 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -1085,15 +1085,21 @@ void vt_set_led_state(int console, int leds)
*
* Handle console start. This is a wrapper for the VT layer
* so that we can keep kbd knowledge internal
+ *
+ * FIXME: We eventually need to hold the kbd lock here to protect
+ * the LED updating. We can't do it yet because fn_hold calls stop_tty
+ * and start_tty under the kbd_event_lock, while normal tty paths
+ * don't hold the lock. We probably need to split out an LED lock
+ * but not during an -rc release!
*/
void vt_kbd_con_start(int console)
{
struct kbd_struct * kbd = kbd_table + console;
- unsigned long flags;
- spin_lock_irqsave(&kbd_event_lock, flags);
+/* unsigned long flags; */
+/* spin_lock_irqsave(&kbd_event_lock, flags); */
clr_vc_kbd_led(kbd, VC_SCROLLOCK);
set_leds();
- spin_unlock_irqrestore(&kbd_event_lock, flags);
+/* spin_unlock_irqrestore(&kbd_event_lock, flags); */
}
/**
@@ -1102,22 +1108,28 @@ void vt_kbd_con_start(int console)
*
* Handle console stop. This is a wrapper for the VT layer
* so that we can keep kbd knowledge internal
+ *
+ * FIXME: We eventually need to hold the kbd lock here to protect
+ * the LED updating. We can't do it yet because fn_hold calls stop_tty
+ * and start_tty under the kbd_event_lock, while normal tty paths
+ * don't hold the lock. We probably need to split out an LED lock
+ * but not during an -rc release!
*/
void vt_kbd_con_stop(int console)
{
struct kbd_struct * kbd = kbd_table + console;
- unsigned long flags;
- spin_lock_irqsave(&kbd_event_lock, flags);
+/* unsigned long flags; */
+/* spin_lock_irqsave(&kbd_event_lock, flags); */
set_vc_kbd_led(kbd, VC_SCROLLOCK);
set_leds();
- spin_unlock_irqrestore(&kbd_event_lock, flags);
+/* spin_unlock_irqrestore(&kbd_event_lock, flags); */
}
/*
* This is the tasklet that updates LED state on all keyboards
* attached to the box. The reason we use tasklet is that we
* need to handle the scenario when keyboard handler is not
- * registered yet but we already getting updates form VT to
+ * registered yet but we already getting updates from the VT to
* update led state.
*/
static void kbd_bh(unsigned long dummy)
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index c6f6560..0bb2b32 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -157,8 +157,9 @@ static void wdm_out_callback(struct urb *urb)
spin_lock(&desc->iuspin);
desc->werr = urb->status;
spin_unlock(&desc->iuspin);
- clear_bit(WDM_IN_USE, &desc->flags);
kfree(desc->outbuf);
+ desc->outbuf = NULL;
+ clear_bit(WDM_IN_USE, &desc->flags);
wake_up(&desc->wait);
}
@@ -338,7 +339,7 @@ static ssize_t wdm_write
if (we < 0)
return -EIO;
- desc->outbuf = buf = kmalloc(count, GFP_KERNEL);
+ buf = kmalloc(count, GFP_KERNEL);
if (!buf) {
rv = -ENOMEM;
goto outnl;
@@ -406,10 +407,12 @@ static ssize_t wdm_write
req->wIndex = desc->inum;
req->wLength = cpu_to_le16(count);
set_bit(WDM_IN_USE, &desc->flags);
+ desc->outbuf = buf;
rv = usb_submit_urb(desc->command, GFP_KERNEL);
if (rv < 0) {
kfree(buf);
+ desc->outbuf = NULL;
clear_bit(WDM_IN_USE, &desc->flags);
dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv);
} else {
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 622b4a4..57ed9e4 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -493,6 +493,15 @@ static int hcd_pci_suspend_noirq(struct device *dev)
pci_save_state(pci_dev);
+ /*
+ * Some systems crash if an EHCI controller is in D3 during
+ * a sleep transition. We have to leave such controllers in D0.
+ */
+ if (hcd->broken_pci_sleep) {
+ dev_dbg(dev, "Staying in PCI D0\n");
+ return retval;
+ }
+
/* If the root hub is dead rather than suspended, disallow remote
* wakeup. usb_hc_died() should ensure that both hosts are marked as
* dying, so we only need to check the primary roothub.
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index a2aa9d6..ec6c97d 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1667,7 +1667,6 @@ void usb_disconnect(struct usb_device **pdev)
{
struct usb_device *udev = *pdev;
int i;
- struct usb_hcd *hcd = bus_to_hcd(udev->bus);
/* mark the device as inactive, so any further urb submissions for
* this device (and any of its children) will fail immediately.
@@ -1690,9 +1689,7 @@ void usb_disconnect(struct usb_device **pdev)
* so that the hardware is now fully quiesced.
*/
dev_dbg (&udev->dev, "unregistering device\n");
- mutex_lock(hcd->bandwidth_mutex);
usb_disable_device(udev, 0);
- mutex_unlock(hcd->bandwidth_mutex);
usb_hcd_synchronize_unlinks(udev);
usb_remove_ep_devs(&udev->ep0);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index aed3e07..ca717da 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1136,8 +1136,6 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
* Deallocates hcd/hardware state for the endpoints (nuking all or most
* pending urbs) and usbcore state for the interfaces, so that usbcore
* must usb_set_configuration() before any interfaces could be used.
- *
- * Must be called with hcd->bandwidth_mutex held.
*/
void usb_disable_device(struct usb_device *dev, int skip_ep0)
{
@@ -1190,7 +1188,9 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
usb_disable_endpoint(dev, i + USB_DIR_IN, false);
}
/* Remove endpoints from the host controller internal state */
+ mutex_lock(hcd->bandwidth_mutex);
usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
+ mutex_unlock(hcd->bandwidth_mutex);
/* Second pass: remove endpoint pointers */
}
for (i = skip_ep0; i < 16; ++i) {
@@ -1750,7 +1750,6 @@ free_interfaces:
/* if it's already configured, clear out old state first.
* getting rid of old interfaces means unbinding their drivers.
*/
- mutex_lock(hcd->bandwidth_mutex);
if (dev->state != USB_STATE_ADDRESS)
usb_disable_device(dev, 1); /* Skip ep0 */
@@ -1763,6 +1762,7 @@ free_interfaces:
* host controller will not allow submissions to dropped endpoints. If
* this call fails, the device state is unchanged.
*/
+ mutex_lock(hcd->bandwidth_mutex);
ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
if (ret < 0) {
mutex_unlock(hcd->bandwidth_mutex);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 7bd815a..99b58d84 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -206,11 +206,11 @@ static void dwc3_free_event_buffers(struct dwc3 *dwc)
for (i = 0; i < dwc->num_event_buffers; i++) {
evt = dwc->ev_buffs[i];
- if (evt) {
+ if (evt)
dwc3_free_one_event_buffer(dwc, evt);
- dwc->ev_buffs[i] = NULL;
- }
}
+
+ kfree(dwc->ev_buffs);
}
/**
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 25910e2..3584a16 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -353,6 +353,9 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
dwc->test_mode_nr = wIndex >> 8;
dwc->test_mode = true;
+ break;
+ default:
+ return -EINVAL;
}
break;
@@ -559,15 +562,20 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
length = trb->size & DWC3_TRB_SIZE_MASK;
if (dwc->ep0_bounced) {
+ unsigned transfer_size = ur->length;
+ unsigned maxp = ep0->endpoint.maxpacket;
+
+ transfer_size += (maxp - (transfer_size % maxp));
transferred = min_t(u32, ur->length,
- ep0->endpoint.maxpacket - length);
+ transfer_size - length);
memcpy(ur->buf, dwc->ep0_bounce, transferred);
dwc->ep0_bounced = false;
} else {
transferred = ur->length - length;
- ur->actual += transferred;
}
+ ur->actual += transferred;
+
if ((epnum & 1) && ur->actual < ur->length) {
/* for some reason we did not get everything out */
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 0c935d7..9d7bcd9 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1863,8 +1863,8 @@ static int __devinit at91udc_probe(struct platform_device *pdev)
mod_timer(&udc->vbus_timer,
jiffies + VBUS_POLL_TIMEOUT);
} else {
- if (request_irq(udc->board.vbus_pin, at91_vbus_irq,
- 0, driver_name, udc)) {
+ if (request_irq(gpio_to_irq(udc->board.vbus_pin),
+ at91_vbus_irq, 0, driver_name, udc)) {
DBG("request vbus irq %d failed\n",
udc->board.vbus_pin);
retval = -EBUSY;
@@ -1886,7 +1886,7 @@ static int __devinit at91udc_probe(struct platform_device *pdev)
return 0;
fail4:
if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled)
- free_irq(udc->board.vbus_pin, udc);
+ free_irq(gpio_to_irq(udc->board.vbus_pin), udc);
fail3:
if (gpio_is_valid(udc->board.vbus_pin))
gpio_free(udc->board.vbus_pin);
@@ -1924,7 +1924,7 @@ static int __exit at91udc_remove(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 0);
remove_debug_file(udc);
if (gpio_is_valid(udc->board.vbus_pin)) {
- free_irq(udc->board.vbus_pin, udc);
+ free_irq(gpio_to_irq(udc->board.vbus_pin), udc);
gpio_free(udc->board.vbus_pin);
}
free_irq(udc->udp_irq, udc);
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index a6dfd21..170cbe8 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -927,7 +927,6 @@ static int dummy_udc_stop(struct usb_gadget *g,
dum->driver = NULL;
- dummy_pullup(&dum->gadget, 0);
return 0;
}
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 1cbba70..f52cb1a 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -712,7 +712,7 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
if (code == FUNCTIONFS_INTERFACE_REVMAP) {
struct ffs_function *func = ffs->func;
ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
- } else if (gadget->ops->ioctl) {
+ } else if (gadget && gadget->ops->ioctl) {
ret = gadget->ops->ioctl(gadget, code, value);
} else {
ret = -ENOTTY;
@@ -1382,6 +1382,7 @@ static void functionfs_unbind(struct ffs_data *ffs)
ffs->ep0req = NULL;
ffs->gadget = NULL;
ffs_data_put(ffs);
+ clear_bit(FFS_FL_BOUND, &ffs->flags);
}
}
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index a371e96..cb8c162 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -2189,7 +2189,7 @@ unknown_cmnd:
common->data_size_from_cmnd = 0;
sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
reply = check_command(common, common->cmnd_size,
- DATA_DIR_UNKNOWN, 0xff, 0, unknown);
+ DATA_DIR_UNKNOWN, ~0, 0, unknown);
if (reply == 0) {
common->curlun->sense_data = SS_INVALID_COMMAND;
reply = -EINVAL;
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 7b1cf18..5234365 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -500,6 +500,7 @@ rndis_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
if (buf) {
memcpy(req->buf, buf, n);
req->complete = rndis_response_complete;
+ req->context = rndis;
rndis_free_response(rndis->config, buf);
value = n;
}
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 4fac569..a896d73 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -2579,7 +2579,7 @@ static int do_scsi_command(struct fsg_dev *fsg)
fsg->data_size_from_cmnd = 0;
sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
if ((reply = check_command(fsg, fsg->cmnd_size,
- DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) {
+ DATA_DIR_UNKNOWN, ~0, 0, unknown)) == 0) {
fsg->curlun->sense_data = SS_INVALID_COMMAND;
reply = -EINVAL;
}
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index 5f94e79..55abfb6 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -730,7 +730,7 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
: (1 << (ep_index(ep)));
/* check if the pipe is empty */
- if (!(list_empty(&ep->queue))) {
+ if (!(list_empty(&ep->queue)) && !(ep_index(ep) == 0)) {
/* Add td to the end */
struct fsl_req *lastreq;
lastreq = list_entry(ep->queue.prev, struct fsl_req, queue);
@@ -918,10 +918,6 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
return -ENOMEM;
}
- /* Update ep0 state */
- if ((ep_index(ep) == 0))
- udc->ep0_state = DATA_STATE_XMIT;
-
/* irq handler advances the queue */
if (req != NULL)
list_add_tail(&req->queue, &ep->queue);
@@ -1279,7 +1275,8 @@ static int ep0_prime_status(struct fsl_udc *udc, int direction)
udc->ep0_dir = USB_DIR_OUT;
ep = &udc->eps[0];
- udc->ep0_state = WAIT_FOR_OUT_STATUS;
+ if (udc->ep0_state != DATA_STATE_XMIT)
+ udc->ep0_state = WAIT_FOR_OUT_STATUS;
req->ep = ep;
req->req.length = 0;
@@ -1384,6 +1381,9 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
list_add_tail(&req->queue, &ep->queue);
udc->ep0_state = DATA_STATE_XMIT;
+ if (ep0_prime_status(udc, EP_DIR_OUT))
+ ep0stall(udc);
+
return;
stall:
ep0stall(udc);
@@ -1492,6 +1492,14 @@ static void setup_received_irq(struct fsl_udc *udc,
spin_lock(&udc->lock);
udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
? DATA_STATE_XMIT : DATA_STATE_RECV;
+ /*
+ * If the data stage is IN, send status prime immediately.
+ * See 2.0 Spec chapter 8.5.3.3 for detail.
+ */
+ if (udc->ep0_state == DATA_STATE_XMIT)
+ if (ep0_prime_status(udc, EP_DIR_OUT))
+ ep0stall(udc);
+
} else {
/* No data phase, IN status from gadget */
udc->ep0_dir = USB_DIR_IN;
@@ -1520,9 +1528,8 @@ static void ep0_req_complete(struct fsl_udc *udc, struct fsl_ep *ep0,
switch (udc->ep0_state) {
case DATA_STATE_XMIT:
- /* receive status phase */
- if (ep0_prime_status(udc, EP_DIR_OUT))
- ep0stall(udc);
+ /* already primed at setup_received_irq */
+ udc->ep0_state = WAIT_FOR_OUT_STATUS;
break;
case DATA_STATE_RECV:
/* send status phase */
diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c
index 331cd67..a85eaf4 100644
--- a/drivers/usb/gadget/g_ffs.c
+++ b/drivers/usb/gadget/g_ffs.c
@@ -161,7 +161,7 @@ static struct usb_composite_driver gfs_driver = {
static struct ffs_data *gfs_ffs_data;
static unsigned long gfs_registered;
-static int gfs_init(void)
+static int __init gfs_init(void)
{
ENTER();
@@ -169,7 +169,7 @@ static int gfs_init(void)
}
module_init(gfs_init);
-static void gfs_exit(void)
+static void __exit gfs_exit(void)
{
ENTER();
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 69295ba..105b206 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -340,7 +340,7 @@ static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg)
/* currently we allocate TX FIFOs for all possible endpoints,
* and assume that they are all the same size. */
- for (ep = 0; ep <= 15; ep++) {
+ for (ep = 1; ep <= 15; ep++) {
val = addr;
val |= size << S3C_DPTXFSIZn_DPTxFSize_SHIFT;
addr += size;
@@ -741,7 +741,7 @@ static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg,
/* write size / packets */
writel(epsize, hsotg->regs + epsize_reg);
- if (using_dma(hsotg)) {
+ if (using_dma(hsotg) && !continuing) {
unsigned int dma_reg;
/* write DMA address to control register, buffer already
@@ -1696,10 +1696,12 @@ static void s3c_hsotg_set_ep_maxpacket(struct s3c_hsotg *hsotg,
reg |= mpsval;
writel(reg, regs + S3C_DIEPCTL(ep));
- reg = readl(regs + S3C_DOEPCTL(ep));
- reg &= ~S3C_DxEPCTL_MPS_MASK;
- reg |= mpsval;
- writel(reg, regs + S3C_DOEPCTL(ep));
+ if (ep) {
+ reg = readl(regs + S3C_DOEPCTL(ep));
+ reg &= ~S3C_DxEPCTL_MPS_MASK;
+ reg |= mpsval;
+ writel(reg, regs + S3C_DOEPCTL(ep));
+ }
return;
@@ -1919,7 +1921,8 @@ static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx,
ints & S3C_DIEPMSK_TxFIFOEmpty) {
dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
__func__, idx);
- s3c_hsotg_trytx(hsotg, hs_ep);
+ if (!using_dma(hsotg))
+ s3c_hsotg_trytx(hsotg, hs_ep);
}
}
}
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index 56da49f..e5e44f8 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -263,9 +263,9 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
if (udc_is_newstyle(udc)) {
udc->driver->disconnect(udc->gadget);
+ usb_gadget_disconnect(udc->gadget);
udc->driver->unbind(udc->gadget);
usb_gadget_udc_stop(udc->gadget, udc->driver);
- usb_gadget_disconnect(udc->gadget);
} else {
usb_gadget_stop(udc->gadget, udc->driver);
}
@@ -411,9 +411,13 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
if (sysfs_streq(buf, "connect")) {
+ if (udc_is_newstyle(udc))
+ usb_gadget_udc_start(udc->gadget, udc->driver);
usb_gadget_connect(udc->gadget);
} else if (sysfs_streq(buf, "disconnect")) {
usb_gadget_disconnect(udc->gadget);
+ if (udc_is_newstyle(udc))
+ usb_gadget_udc_stop(udc->gadget, udc->driver);
} else {
dev_err(dev, "unsupported command '%s'\n", buf);
return -EINVAL;
diff --git a/drivers/usb/gadget/uvc.h b/drivers/usb/gadget/uvc.h
index bc78c60..ca4e03a 100644
--- a/drivers/usb/gadget/uvc.h
+++ b/drivers/usb/gadget/uvc.h
@@ -28,7 +28,7 @@
struct uvc_request_data
{
- unsigned int length;
+ __s32 length;
__u8 data[60];
};
diff --git a/drivers/usb/gadget/uvc_queue.c b/drivers/usb/gadget/uvc_queue.c
index d776adb..0cdf89d 100644
--- a/drivers/usb/gadget/uvc_queue.c
+++ b/drivers/usb/gadget/uvc_queue.c
@@ -543,11 +543,11 @@ done:
return ret;
}
+/* called with queue->irqlock held.. */
static struct uvc_buffer *
uvc_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf)
{
struct uvc_buffer *nextbuf;
- unsigned long flags;
if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
buf->buf.length != buf->buf.bytesused) {
@@ -556,14 +556,12 @@ uvc_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf)
return buf;
}
- spin_lock_irqsave(&queue->irqlock, flags);
list_del(&buf->queue);
if (!list_empty(&queue->irqqueue))
nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
queue);
else
nextbuf = NULL;
- spin_unlock_irqrestore(&queue->irqlock, flags);
buf->buf.sequence = queue->sequence++;
do_gettimeofday(&buf->buf.timestamp);
diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c
index f6e083b..54d7ca5 100644
--- a/drivers/usb/gadget/uvc_v4l2.c
+++ b/drivers/usb/gadget/uvc_v4l2.c
@@ -39,7 +39,7 @@ uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data)
if (data->length < 0)
return usb_ep_set_halt(cdev->gadget->ep0);
- req->length = min(uvc->event_length, data->length);
+ req->length = min_t(unsigned int, uvc->event_length, data->length);
req->zero = data->length < uvc->event_length;
req->dma = DMA_ADDR_INVALID;
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 3e73451..d0a84bd 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -218,6 +218,9 @@ static void ehci_fsl_setup_phy(struct ehci_hcd *ehci,
u32 portsc;
struct usb_hcd *hcd = ehci_to_hcd(ehci);
void __iomem *non_ehci = hcd->regs;
+ struct fsl_usb2_platform_data *pdata;
+
+ pdata = hcd->self.controller->platform_data;
portsc = ehci_readl(ehci, &ehci->regs->port_status[port_offset]);
portsc &= ~(PORT_PTS_MSK | PORT_PTS_PTW);
@@ -234,7 +237,9 @@ static void ehci_fsl_setup_phy(struct ehci_hcd *ehci,
/* fall through */
case FSL_USB2_PHY_UTMI:
/* enable UTMI PHY */
- setbits32(non_ehci + FSL_SOC_USB_CTRL, CTRL_UTMI_PHY_EN);
+ if (pdata->have_sysif_regs)
+ setbits32(non_ehci + FSL_SOC_USB_CTRL,
+ CTRL_UTMI_PHY_EN);
portsc |= PORT_PTS_UTMI;
break;
case FSL_USB2_PHY_NONE:
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 806cc95..4a3bc5b 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -858,8 +858,13 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
goto dead;
}
+ /*
+ * We don't use STS_FLR, but some controllers don't like it to
+ * remain on, so mask it out along with the other status bits.
+ */
+ masked_status = status & (INTR_MASK | STS_FLR);
+
/* Shared IRQ? */
- masked_status = status & INTR_MASK;
if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
spin_unlock(&ehci->lock);
return IRQ_NONE;
@@ -910,7 +915,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
pcd_status = status;
/* resume root hub? */
- if (!(cmd & CMD_RUN))
+ if (ehci->rh_state == EHCI_RH_SUSPENDED)
usb_hcd_resume_root_hub(hcd);
/* get per-port change detect bits */
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index bba9850..5c78f9e 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -42,6 +42,7 @@
#include <plat/usb.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
+#include <linux/gpio.h>
/* EHCI Register Set */
#define EHCI_INSNREG04 (0xA0)
@@ -191,6 +192,19 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
}
}
+ if (pdata->phy_reset) {
+ if (gpio_is_valid(pdata->reset_gpio_port[0]))
+ gpio_request_one(pdata->reset_gpio_port[0],
+ GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
+
+ if (gpio_is_valid(pdata->reset_gpio_port[1]))
+ gpio_request_one(pdata->reset_gpio_port[1],
+ GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
+
+ /* Hold the PHY in RESET for enough time till DIR is high */
+ udelay(10);
+ }
+
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
@@ -237,6 +251,19 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
/* root ports should always stay powered */
ehci_port_power(omap_ehci, 1);
+ if (pdata->phy_reset) {
+ /* Hold the PHY in RESET for enough time till
+ * PHY is settled and ready
+ */
+ udelay(10);
+
+ if (gpio_is_valid(pdata->reset_gpio_port[0]))
+ gpio_set_value(pdata->reset_gpio_port[0], 1);
+
+ if (gpio_is_valid(pdata->reset_gpio_port[1]))
+ gpio_set_value(pdata->reset_gpio_port[1], 1);
+ }
+
return 0;
err_add_hcd:
@@ -259,8 +286,9 @@ err_io:
*/
static int ehci_hcd_omap_remove(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct device *dev = &pdev->dev;
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ehci_hcd_omap_platform_data *pdata = dev->platform_data;
usb_remove_hcd(hcd);
disable_put_regulator(dev->platform_data);
@@ -269,6 +297,13 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
+ if (pdata->phy_reset) {
+ if (gpio_is_valid(pdata->reset_gpio_port[0]))
+ gpio_free(pdata->reset_gpio_port[0]);
+
+ if (gpio_is_valid(pdata->reset_gpio_port[1]))
+ gpio_free(pdata->reset_gpio_port[1]);
+ }
return 0;
}
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 01bb7241d..fe8dc06 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -144,6 +144,14 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
hcd->has_tt = 1;
tdi_reset(ehci);
}
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) {
+ /* EHCI #1 or #2 on 6 Series/C200 Series chipset */
+ if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) {
+ ehci_info(ehci, "broken D3 during system sleep on ASUS\n");
+ hcd->broken_pci_sleep = 1;
+ device_set_wakeup_capable(&pdev->dev, false);
+ }
+ }
break;
case PCI_VENDOR_ID_TDI:
if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 73544bd..f214a80 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -24,6 +24,7 @@
#include <linux/gpio.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/pm_runtime.h>
#include <mach/usb_phy.h>
#include <mach/iomap.h>
@@ -37,9 +38,7 @@ struct tegra_ehci_hcd {
struct clk *emc_clk;
struct usb_phy *transceiver;
int host_resumed;
- int bus_suspended;
int port_resuming;
- int power_down_on_bus_suspend;
enum tegra_usb_phy_port_speed port_speed;
};
@@ -273,120 +272,6 @@ static void tegra_ehci_restart(struct usb_hcd *hcd)
up_write(&ehci_cf_port_reset_rwsem);
}
-static int tegra_usb_suspend(struct usb_hcd *hcd)
-{
- struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
- struct ehci_regs __iomem *hw = tegra->ehci->regs;
- unsigned long flags;
-
- spin_lock_irqsave(&tegra->ehci->lock, flags);
-
- tegra->port_speed = (readl(&hw->port_status[0]) >> 26) & 0x3;
- ehci_halt(tegra->ehci);
- clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
-
- spin_unlock_irqrestore(&tegra->ehci->lock, flags);
-
- tegra_ehci_power_down(hcd);
- return 0;
-}
-
-static int tegra_usb_resume(struct usb_hcd *hcd)
-{
- struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- struct ehci_regs __iomem *hw = ehci->regs;
- unsigned long val;
-
- set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
- tegra_ehci_power_up(hcd);
-
- if (tegra->port_speed > TEGRA_USB_PHY_PORT_SPEED_HIGH) {
- /* Wait for the phy to detect new devices
- * before we restart the controller */
- msleep(10);
- goto restart;
- }
-
- /* Force the phy to keep data lines in suspend state */
- tegra_ehci_phy_restore_start(tegra->phy, tegra->port_speed);
-
- /* Enable host mode */
- tdi_reset(ehci);
-
- /* Enable Port Power */
- val = readl(&hw->port_status[0]);
- val |= PORT_POWER;
- writel(val, &hw->port_status[0]);
- udelay(10);
-
- /* Check if the phy resume from LP0. When the phy resume from LP0
- * USB register will be reset. */
- if (!readl(&hw->async_next)) {
- /* Program the field PTC based on the saved speed mode */
- val = readl(&hw->port_status[0]);
- val &= ~PORT_TEST(~0);
- if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_HIGH)
- val |= PORT_TEST_FORCE;
- else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_FULL)
- val |= PORT_TEST(6);
- else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW)
- val |= PORT_TEST(7);
- writel(val, &hw->port_status[0]);
- udelay(10);
-
- /* Disable test mode by setting PTC field to NORMAL_OP */
- val = readl(&hw->port_status[0]);
- val &= ~PORT_TEST(~0);
- writel(val, &hw->port_status[0]);
- udelay(10);
- }
-
- /* Poll until CCS is enabled */
- if (handshake(ehci, &hw->port_status[0], PORT_CONNECT,
- PORT_CONNECT, 2000)) {
- pr_err("%s: timeout waiting for PORT_CONNECT\n", __func__);
- goto restart;
- }
-
- /* Poll until PE is enabled */
- if (handshake(ehci, &hw->port_status[0], PORT_PE,
- PORT_PE, 2000)) {
- pr_err("%s: timeout waiting for USB_PORTSC1_PE\n", __func__);
- goto restart;
- }
-
- /* Clear the PCI status, to avoid an interrupt taken upon resume */
- val = readl(&hw->status);
- val |= STS_PCD;
- writel(val, &hw->status);
-
- /* Put controller in suspend mode by writing 1 to SUSP bit of PORTSC */
- val = readl(&hw->port_status[0]);
- if ((val & PORT_POWER) && (val & PORT_PE)) {
- val |= PORT_SUSPEND;
- writel(val, &hw->port_status[0]);
-
- /* Wait until port suspend completes */
- if (handshake(ehci, &hw->port_status[0], PORT_SUSPEND,
- PORT_SUSPEND, 1000)) {
- pr_err("%s: timeout waiting for PORT_SUSPEND\n",
- __func__);
- goto restart;
- }
- }
-
- tegra_ehci_phy_restore_end(tegra->phy);
- return 0;
-
-restart:
- if (tegra->port_speed <= TEGRA_USB_PHY_PORT_SPEED_HIGH)
- tegra_ehci_phy_restore_end(tegra->phy);
-
- tegra_ehci_restart(hcd);
- return 0;
-}
-
static void tegra_ehci_shutdown(struct usb_hcd *hcd)
{
struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
@@ -434,36 +319,6 @@ static int tegra_ehci_setup(struct usb_hcd *hcd)
return retval;
}
-#ifdef CONFIG_PM
-static int tegra_ehci_bus_suspend(struct usb_hcd *hcd)
-{
- struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
- int error_status = 0;
-
- error_status = ehci_bus_suspend(hcd);
- if (!error_status && tegra->power_down_on_bus_suspend) {
- tegra_usb_suspend(hcd);
- tegra->bus_suspended = 1;
- }
-
- return error_status;
-}
-
-static int tegra_ehci_bus_resume(struct usb_hcd *hcd)
-{
- struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
-
- if (tegra->bus_suspended && tegra->power_down_on_bus_suspend) {
- tegra_usb_resume(hcd);
- tegra->bus_suspended = 0;
- }
-
- tegra_usb_phy_preresume(tegra->phy);
- tegra->port_resuming = 1;
- return ehci_bus_resume(hcd);
-}
-#endif
-
struct temp_buffer {
void *kmalloc_ptr;
void *old_xfer_buffer;
@@ -574,8 +429,8 @@ static const struct hc_driver tegra_ehci_hc_driver = {
.hub_control = tegra_ehci_hub_control,
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
#ifdef CONFIG_PM
- .bus_suspend = tegra_ehci_bus_suspend,
- .bus_resume = tegra_ehci_bus_resume,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
#endif
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
@@ -603,11 +458,187 @@ static int setup_vbus_gpio(struct platform_device *pdev)
dev_err(&pdev->dev, "can't enable vbus\n");
return err;
}
- gpio_set_value(gpio, 1);
return err;
}
+#ifdef CONFIG_PM
+
+static int controller_suspend(struct device *dev)
+{
+ struct tegra_ehci_hcd *tegra =
+ platform_get_drvdata(to_platform_device(dev));
+ struct ehci_hcd *ehci = tegra->ehci;
+ struct usb_hcd *hcd = ehci_to_hcd(ehci);
+ struct ehci_regs __iomem *hw = ehci->regs;
+ unsigned long flags;
+
+ if (time_before(jiffies, ehci->next_statechange))
+ msleep(10);
+
+ spin_lock_irqsave(&ehci->lock, flags);
+
+ tegra->port_speed = (readl(&hw->port_status[0]) >> 26) & 0x3;
+ ehci_halt(ehci);
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ spin_unlock_irqrestore(&ehci->lock, flags);
+
+ tegra_ehci_power_down(hcd);
+ return 0;
+}
+
+static int controller_resume(struct device *dev)
+{
+ struct tegra_ehci_hcd *tegra =
+ platform_get_drvdata(to_platform_device(dev));
+ struct ehci_hcd *ehci = tegra->ehci;
+ struct usb_hcd *hcd = ehci_to_hcd(ehci);
+ struct ehci_regs __iomem *hw = ehci->regs;
+ unsigned long val;
+
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ tegra_ehci_power_up(hcd);
+
+ if (tegra->port_speed > TEGRA_USB_PHY_PORT_SPEED_HIGH) {
+ /* Wait for the phy to detect new devices
+ * before we restart the controller */
+ msleep(10);
+ goto restart;
+ }
+
+ /* Force the phy to keep data lines in suspend state */
+ tegra_ehci_phy_restore_start(tegra->phy, tegra->port_speed);
+
+ /* Enable host mode */
+ tdi_reset(ehci);
+
+ /* Enable Port Power */
+ val = readl(&hw->port_status[0]);
+ val |= PORT_POWER;
+ writel(val, &hw->port_status[0]);
+ udelay(10);
+
+ /* Check if the phy resume from LP0. When the phy resume from LP0
+ * USB register will be reset. */
+ if (!readl(&hw->async_next)) {
+ /* Program the field PTC based on the saved speed mode */
+ val = readl(&hw->port_status[0]);
+ val &= ~PORT_TEST(~0);
+ if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_HIGH)
+ val |= PORT_TEST_FORCE;
+ else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_FULL)
+ val |= PORT_TEST(6);
+ else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW)
+ val |= PORT_TEST(7);
+ writel(val, &hw->port_status[0]);
+ udelay(10);
+
+ /* Disable test mode by setting PTC field to NORMAL_OP */
+ val = readl(&hw->port_status[0]);
+ val &= ~PORT_TEST(~0);
+ writel(val, &hw->port_status[0]);
+ udelay(10);
+ }
+
+ /* Poll until CCS is enabled */
+ if (handshake(ehci, &hw->port_status[0], PORT_CONNECT,
+ PORT_CONNECT, 2000)) {
+ pr_err("%s: timeout waiting for PORT_CONNECT\n", __func__);
+ goto restart;
+ }
+
+ /* Poll until PE is enabled */
+ if (handshake(ehci, &hw->port_status[0], PORT_PE,
+ PORT_PE, 2000)) {
+ pr_err("%s: timeout waiting for USB_PORTSC1_PE\n", __func__);
+ goto restart;
+ }
+
+ /* Clear the PCI status, to avoid an interrupt taken upon resume */
+ val = readl(&hw->status);
+ val |= STS_PCD;
+ writel(val, &hw->status);
+
+ /* Put controller in suspend mode by writing 1 to SUSP bit of PORTSC */
+ val = readl(&hw->port_status[0]);
+ if ((val & PORT_POWER) && (val & PORT_PE)) {
+ val |= PORT_SUSPEND;
+ writel(val, &hw->port_status[0]);
+
+ /* Wait until port suspend completes */
+ if (handshake(ehci, &hw->port_status[0], PORT_SUSPEND,
+ PORT_SUSPEND, 1000)) {
+ pr_err("%s: timeout waiting for PORT_SUSPEND\n",
+ __func__);
+ goto restart;
+ }
+ }
+
+ tegra_ehci_phy_restore_end(tegra->phy);
+ goto done;
+
+ restart:
+ if (tegra->port_speed <= TEGRA_USB_PHY_PORT_SPEED_HIGH)
+ tegra_ehci_phy_restore_end(tegra->phy);
+
+ tegra_ehci_restart(hcd);
+
+ done:
+ tegra_usb_phy_preresume(tegra->phy);
+ tegra->port_resuming = 1;
+ return 0;
+}
+
+static int tegra_ehci_suspend(struct device *dev)
+{
+ struct tegra_ehci_hcd *tegra =
+ platform_get_drvdata(to_platform_device(dev));
+ struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
+ int rc = 0;
+
+ /*
+ * When system sleep is supported and USB controller wakeup is
+ * implemented: If the controller is runtime-suspended and the
+ * wakeup setting needs to be changed, call pm_runtime_resume().
+ */
+ if (HCD_HW_ACCESSIBLE(hcd))
+ rc = controller_suspend(dev);
+ return rc;
+}
+
+static int tegra_ehci_resume(struct device *dev)
+{
+ int rc;
+
+ rc = controller_resume(dev);
+ if (rc == 0) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+ return rc;
+}
+
+static int tegra_ehci_runtime_suspend(struct device *dev)
+{
+ return controller_suspend(dev);
+}
+
+static int tegra_ehci_runtime_resume(struct device *dev)
+{
+ return controller_resume(dev);
+}
+
+static const struct dev_pm_ops tegra_ehci_pm_ops = {
+ .suspend = tegra_ehci_suspend,
+ .resume = tegra_ehci_resume,
+ .runtime_suspend = tegra_ehci_runtime_suspend,
+ .runtime_resume = tegra_ehci_runtime_resume,
+};
+
+#endif
+
static u64 tegra_ehci_dma_mask = DMA_BIT_MASK(32);
static int tegra_ehci_probe(struct platform_device *pdev)
@@ -722,7 +753,6 @@ static int tegra_ehci_probe(struct platform_device *pdev)
}
tegra->host_resumed = 1;
- tegra->power_down_on_bus_suspend = pdata->power_down_on_bus_suspend;
tegra->ehci = hcd_to_ehci(hcd);
irq = platform_get_irq(pdev, 0);
@@ -731,7 +761,6 @@ static int tegra_ehci_probe(struct platform_device *pdev)
err = -ENODEV;
goto fail;
}
- set_irq_flags(irq, IRQF_VALID);
#ifdef CONFIG_USB_OTG_UTILS
if (pdata->operating_mode == TEGRA_USB_OTG) {
@@ -747,6 +776,14 @@ static int tegra_ehci_probe(struct platform_device *pdev)
goto fail;
}
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+
+ /* Don't skip the pm_runtime_forbid call if wakeup isn't working */
+ /* if (!pdata->power_down_on_bus_suspend) */
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
return err;
fail:
@@ -773,33 +810,6 @@ fail_hcd:
return err;
}
-#ifdef CONFIG_PM
-static int tegra_ehci_resume(struct platform_device *pdev)
-{
- struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
- struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
-
- if (tegra->bus_suspended)
- return 0;
-
- return tegra_usb_resume(hcd);
-}
-
-static int tegra_ehci_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
- struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci);
-
- if (tegra->bus_suspended)
- return 0;
-
- if (time_before(jiffies, tegra->ehci->next_statechange))
- msleep(10);
-
- return tegra_usb_suspend(hcd);
-}
-#endif
-
static int tegra_ehci_remove(struct platform_device *pdev)
{
struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev);
@@ -808,6 +818,10 @@ static int tegra_ehci_remove(struct platform_device *pdev)
if (tegra == NULL || hcd == NULL)
return -EINVAL;
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
#ifdef CONFIG_USB_OTG_UTILS
if (tegra->transceiver) {
otg_set_host(tegra->transceiver->otg, NULL);
@@ -848,13 +862,12 @@ static struct of_device_id tegra_ehci_of_match[] __devinitdata = {
static struct platform_driver tegra_ehci_driver = {
.probe = tegra_ehci_probe,
.remove = tegra_ehci_remove,
-#ifdef CONFIG_PM
- .suspend = tegra_ehci_suspend,
- .resume = tegra_ehci_resume,
-#endif
.shutdown = tegra_ehci_hcd_shutdown,
.driver = {
.name = "tegra-ehci",
.of_match_table = tegra_ehci_of_match,
+#ifdef CONFIG_PM
+ .pm = &tegra_ehci_pm_ops,
+#endif
}
};
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 09f597a..13ebeca 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -94,7 +94,7 @@ static void at91_stop_hc(struct platform_device *pdev)
/*-------------------------------------------------------------------------*/
-static void usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *);
+static void __devexit usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *);
/* configure so an HC device and id are always provided */
/* always called with process context; sleeping is OK */
@@ -108,7 +108,7 @@ static void usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *);
* then invokes the start() method for the HCD associated with it
* through the hotplug entry's driver_data.
*/
-static int usb_hcd_at91_probe(const struct hc_driver *driver,
+static int __devinit usb_hcd_at91_probe(const struct hc_driver *driver,
struct platform_device *pdev)
{
int retval;
@@ -203,7 +203,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
* context, "rmmod" or something similar.
*
*/
-static void usb_hcd_at91_remove(struct usb_hcd *hcd,
+static void __devexit usb_hcd_at91_remove(struct usb_hcd *hcd,
struct platform_device *pdev)
{
usb_remove_hcd(hcd);
@@ -545,7 +545,7 @@ static int __devinit ohci_at91_of_init(struct platform_device *pdev)
/*-------------------------------------------------------------------------*/
-static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
+static int __devinit ohci_hcd_at91_drv_probe(struct platform_device *pdev)
{
struct at91_usbh_data *pdata;
int i;
@@ -620,7 +620,7 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
return usb_hcd_at91_probe(&ohci_at91_hc_driver, pdev);
}
-static int ohci_hcd_at91_drv_remove(struct platform_device *pdev)
+static int __devexit ohci_hcd_at91_drv_remove(struct platform_device *pdev)
{
struct at91_usbh_data *pdata = pdev->dev.platform_data;
int i;
@@ -696,7 +696,7 @@ MODULE_ALIAS("platform:at91_ohci");
static struct platform_driver ohci_hcd_at91_driver = {
.probe = ohci_hcd_at91_drv_probe,
- .remove = ohci_hcd_at91_drv_remove,
+ .remove = __devexit_p(ohci_hcd_at91_drv_remove),
.shutdown = usb_hcd_platform_shutdown,
.suspend = ohci_hcd_at91_drv_suspend,
.resume = ohci_hcd_at91_drv_resume,
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 959145b..9dcb68f 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -423,7 +423,7 @@ alloc_sglist(int nents, int max, int vary)
unsigned i;
unsigned size = max;
- sg = kmalloc(nents * sizeof *sg, GFP_KERNEL);
+ sg = kmalloc_array(nents, sizeof *sg, GFP_KERNEL);
if (!sg)
return NULL;
sg_init_table(sg, nents);
@@ -904,6 +904,9 @@ test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
struct ctrl_ctx context;
int i;
+ if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen)
+ return -EOPNOTSUPP;
+
spin_lock_init(&context.lock);
context.dev = dev;
init_completion(&context.complete);
@@ -1981,8 +1984,6 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
/* queued control messaging */
case 10:
- if (param->sglen == 0)
- break;
retval = 0;
dev_info(&intf->dev,
"TEST 10: queue %d control calls, %d times\n",
@@ -2276,6 +2277,8 @@ usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (status < 0) {
WARNING(dev, "couldn't get endpoints, %d\n",
status);
+ kfree(dev->buf);
+ kfree(dev);
return status;
}
/* may find bulk or ISO pipes */
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 897edda..7020146 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -99,9 +99,7 @@ static void yurex_delete(struct kref *kref)
usb_put_dev(dev->udev);
if (dev->cntl_urb) {
usb_kill_urb(dev->cntl_urb);
- if (dev->cntl_req)
- usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
- dev->cntl_req, dev->cntl_urb->setup_dma);
+ kfree(dev->cntl_req);
if (dev->cntl_buffer)
usb_free_coherent(dev->udev, YUREX_BUF_SIZE,
dev->cntl_buffer, dev->cntl_urb->transfer_dma);
@@ -234,9 +232,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
}
/* allocate buffer for control req */
- dev->cntl_req = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE,
- GFP_KERNEL,
- &dev->cntl_urb->setup_dma);
+ dev->cntl_req = kmalloc(YUREX_BUF_SIZE, GFP_KERNEL);
if (!dev->cntl_req) {
err("Could not allocate cntl_req");
goto error;
@@ -286,7 +282,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
usb_rcvintpipe(dev->udev, dev->int_in_endpointAddr),
dev->int_buffer, YUREX_BUF_SIZE, yurex_interrupt,
dev, 1);
- dev->cntl_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ dev->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
if (usb_submit_urb(dev->urb, GFP_KERNEL)) {
retval = -EIO;
err("Could not submitting URB");
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 97ab975..768b4b5 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -386,7 +386,7 @@ static int davinci_musb_init(struct musb *musb)
usb_nop_xceiv_register();
musb->xceiv = usb_get_transceiver();
if (!musb->xceiv)
- return -ENODEV;
+ goto unregister;
musb->mregs += DAVINCI_BASE_OFFSET;
@@ -444,6 +444,7 @@ static int davinci_musb_init(struct musb *musb)
fail:
usb_put_transceiver(musb->xceiv);
+unregister:
usb_nop_xceiv_unregister();
return -ENODEV;
}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 0f8b829..66aaccf 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -137,6 +137,9 @@ static int musb_ulpi_read(struct usb_phy *phy, u32 offset)
int i = 0;
u8 r;
u8 power;
+ int ret;
+
+ pm_runtime_get_sync(phy->io_dev);
/* Make sure the transceiver is not in low power mode */
power = musb_readb(addr, MUSB_POWER);
@@ -154,15 +157,22 @@ static int musb_ulpi_read(struct usb_phy *phy, u32 offset)
while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
& MUSB_ULPI_REG_CMPLT)) {
i++;
- if (i == 10000)
- return -ETIMEDOUT;
+ if (i == 10000) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
}
r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
r &= ~MUSB_ULPI_REG_CMPLT;
musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
- return musb_readb(addr, MUSB_ULPI_REG_DATA);
+ ret = musb_readb(addr, MUSB_ULPI_REG_DATA);
+
+out:
+ pm_runtime_put(phy->io_dev);
+
+ return ret;
}
static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
@@ -171,6 +181,9 @@ static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
int i = 0;
u8 r = 0;
u8 power;
+ int ret = 0;
+
+ pm_runtime_get_sync(phy->io_dev);
/* Make sure the transceiver is not in low power mode */
power = musb_readb(addr, MUSB_POWER);
@@ -184,15 +197,20 @@ static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
& MUSB_ULPI_REG_CMPLT)) {
i++;
- if (i == 10000)
- return -ETIMEDOUT;
+ if (i == 10000) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
}
r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
r &= ~MUSB_ULPI_REG_CMPLT;
musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
- return 0;
+out:
+ pm_runtime_put(phy->io_dev);
+
+ return ret;
}
#else
#define musb_ulpi_read NULL
@@ -1904,14 +1922,17 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
if (!musb->isr) {
status = -ENODEV;
- goto fail3;
+ goto fail2;
}
if (!musb->xceiv->io_ops) {
+ musb->xceiv->io_dev = musb->controller;
musb->xceiv->io_priv = musb->mregs;
musb->xceiv->io_ops = &musb_ulpi_access;
}
+ pm_runtime_get_sync(musb->controller);
+
#ifndef CONFIG_MUSB_PIO_ONLY
if (use_dma && dev->dma_mask) {
struct dma_controller *c;
@@ -2023,6 +2044,8 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
goto fail5;
#endif
+ pm_runtime_put(musb->controller);
+
dev_info(dev, "USB %s mode controller at %p using %s, IRQ %d\n",
({char *s;
switch (musb->board_mode) {
@@ -2047,6 +2070,9 @@ fail4:
musb_gadget_cleanup(musb);
fail3:
+ pm_runtime_put_sync(musb->controller);
+
+fail2:
if (musb->irq_wake)
device_init_wakeup(dev, 0);
musb_platform_exit(musb);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 93de517..f4a40f0 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -449,7 +449,7 @@ struct musb {
* We added this flag to forcefully disable double
* buffering until we get it working.
*/
- unsigned double_buffer_not_ok:1 __deprecated;
+ unsigned double_buffer_not_ok:1;
struct musb_hdrc_config *config;
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 79cb0af..ef8d744 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2098,7 +2098,7 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
}
/* turn off DMA requests, discard state, stop polling ... */
- if (is_in) {
+ if (ep->epnum && is_in) {
/* giveback saves bulk toggle */
csr = musb_h_flush_rxfifo(ep, 0);
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 2ae0bb3..c7785e8 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -282,7 +282,8 @@ static void musb_otg_notifier_work(struct work_struct *data_notifier_work)
static int omap2430_musb_init(struct musb *musb)
{
- u32 l, status = 0;
+ u32 l;
+ int status = 0;
struct device *dev = musb->controller;
struct musb_hdrc_platform_data *plat = dev->platform_data;
struct omap_musb_board_data *data = plat->board_data;
@@ -301,7 +302,7 @@ static int omap2430_musb_init(struct musb *musb)
status = pm_runtime_get_sync(dev);
if (status < 0) {
- dev_err(dev, "pm_runtime_get_sync FAILED");
+ dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status);
goto err1;
}
@@ -333,6 +334,7 @@ static int omap2430_musb_init(struct musb *musb)
setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
+ pm_runtime_put_noidle(musb->controller);
return 0;
err1:
@@ -452,14 +454,14 @@ static int __devinit omap2430_probe(struct platform_device *pdev)
goto err2;
}
+ pm_runtime_enable(&pdev->dev);
+
ret = platform_device_add(musb);
if (ret) {
dev_err(&pdev->dev, "failed to register musb device\n");
goto err2;
}
- pm_runtime_enable(&pdev->dev);
-
return 0;
err2:
@@ -478,7 +480,6 @@ static int __devexit omap2430_remove(struct platform_device *pdev)
platform_device_del(glue->musb);
platform_device_put(glue->musb);
- pm_runtime_put(&pdev->dev);
kfree(glue);
return 0;
@@ -491,11 +492,13 @@ static int omap2430_runtime_suspend(struct device *dev)
struct omap2430_glue *glue = dev_get_drvdata(dev);
struct musb *musb = glue_to_musb(glue);
- musb->context.otg_interfsel = musb_readl(musb->mregs,
- OTG_INTERFSEL);
+ if (musb) {
+ musb->context.otg_interfsel = musb_readl(musb->mregs,
+ OTG_INTERFSEL);
- omap2430_low_level_exit(musb);
- usb_phy_set_suspend(musb->xceiv, 1);
+ omap2430_low_level_exit(musb);
+ usb_phy_set_suspend(musb->xceiv, 1);
+ }
return 0;
}
@@ -505,11 +508,13 @@ static int omap2430_runtime_resume(struct device *dev)
struct omap2430_glue *glue = dev_get_drvdata(dev);
struct musb *musb = glue_to_musb(glue);
- omap2430_low_level_init(musb);
- musb_writel(musb->mregs, OTG_INTERFSEL,
- musb->context.otg_interfsel);
+ if (musb) {
+ omap2430_low_level_init(musb);
+ musb_writel(musb->mregs, OTG_INTERFSEL,
+ musb->context.otg_interfsel);
- usb_phy_set_suspend(musb->xceiv, 0);
+ usb_phy_set_suspend(musb->xceiv, 0);
+ }
return 0;
}
diff --git a/drivers/usb/otg/gpio_vbus.c b/drivers/usb/otg/gpio_vbus.c
index 3ece43a..a0a2178 100644
--- a/drivers/usb/otg/gpio_vbus.c
+++ b/drivers/usb/otg/gpio_vbus.c
@@ -96,7 +96,7 @@ static void gpio_vbus_work(struct work_struct *work)
struct gpio_vbus_data *gpio_vbus =
container_of(work, struct gpio_vbus_data, work);
struct gpio_vbus_mach_info *pdata = gpio_vbus->dev->platform_data;
- int gpio;
+ int gpio, status;
if (!gpio_vbus->phy.otg->gadget)
return;
@@ -108,7 +108,9 @@ static void gpio_vbus_work(struct work_struct *work)
*/
gpio = pdata->gpio_pullup;
if (is_vbus_powered(pdata)) {
+ status = USB_EVENT_VBUS;
gpio_vbus->phy.state = OTG_STATE_B_PERIPHERAL;
+ gpio_vbus->phy.last_event = status;
usb_gadget_vbus_connect(gpio_vbus->phy.otg->gadget);
/* drawing a "unit load" is *always* OK, except for OTG */
@@ -117,6 +119,9 @@ static void gpio_vbus_work(struct work_struct *work)
/* optionally enable D+ pullup */
if (gpio_is_valid(gpio))
gpio_set_value(gpio, !pdata->gpio_pullup_inverted);
+
+ atomic_notifier_call_chain(&gpio_vbus->phy.notifier,
+ status, gpio_vbus->phy.otg->gadget);
} else {
/* optionally disable D+ pullup */
if (gpio_is_valid(gpio))
@@ -125,7 +130,12 @@ static void gpio_vbus_work(struct work_struct *work)
set_vbus_draw(gpio_vbus, 0);
usb_gadget_vbus_disconnect(gpio_vbus->phy.otg->gadget);
+ status = USB_EVENT_NONE;
gpio_vbus->phy.state = OTG_STATE_B_IDLE;
+ gpio_vbus->phy.last_event = status;
+
+ atomic_notifier_call_chain(&gpio_vbus->phy.notifier,
+ status, gpio_vbus->phy.otg->gadget);
}
}
@@ -287,6 +297,9 @@ static int __init gpio_vbus_probe(struct platform_device *pdev)
irq, err);
goto err_irq;
}
+
+ ATOMIC_INIT_NOTIFIER_HEAD(&gpio_vbus->phy.notifier);
+
INIT_WORK(&gpio_vbus->work, gpio_vbus_work);
gpio_vbus->vbus_draw = regulator_get(&pdev->dev, "vbus_draw");
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 0310e2d..ec30f95 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -287,7 +287,8 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request,
/* Issue the request, attempting to read 'size' bytes */
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
request, REQTYPE_DEVICE_TO_HOST, 0x0000,
- port_priv->bInterfaceNumber, buf, size, 300);
+ port_priv->bInterfaceNumber, buf, size,
+ USB_CTRL_GET_TIMEOUT);
/* Convert data into an array of integers */
for (i = 0; i < length; i++)
@@ -340,12 +341,14 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
result = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0),
request, REQTYPE_HOST_TO_DEVICE, 0x0000,
- port_priv->bInterfaceNumber, buf, size, 300);
+ port_priv->bInterfaceNumber, buf, size,
+ USB_CTRL_SET_TIMEOUT);
} else {
result = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0),
request, REQTYPE_HOST_TO_DEVICE, data[0],
- port_priv->bInterfaceNumber, NULL, 0, 300);
+ port_priv->bInterfaceNumber, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
}
kfree(buf);
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index fdd5aa2..8c8bf80 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -221,7 +221,7 @@ static const struct sierra_iface_info typeB_interface_list = {
};
/* 'blacklist' of interfaces not served by this driver */
-static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11 };
+static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11, 19, 20 };
static const struct sierra_iface_info direct_ip_interface_blacklist = {
.infolen = ARRAY_SIZE(direct_ip_non_serial_ifaces),
.ifaceinfo = direct_ip_non_serial_ifaces,
@@ -289,7 +289,6 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */
{ USB_DEVICE(0x1199, 0x6859) }, /* Sierra Wireless AirCard 885 E */
{ USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */
- { USB_DEVICE(0x1199, 0x68A2) }, /* Sierra Wireless MC7710 */
/* Sierra Wireless C885 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)},
/* Sierra Wireless C888, Air Card 501, USB 303, USB 304 */
@@ -299,6 +298,9 @@ static const struct usb_device_id id_table[] = {
/* Sierra Wireless HSPA Non-Composite Device */
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)},
{ USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */
+ { USB_DEVICE(0x1199, 0x68A2), /* Sierra Wireless MC77xx in QMI mode */
+ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ },
{ USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 66797e9..810c90a 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -645,7 +645,8 @@ void hwarc_neep_cb(struct urb *urb)
dev_err(dev, "NEEP: URB error %d\n", urb->status);
}
result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result < 0) {
+ if (result < 0 && result != -ENODEV && result != -EPERM) {
+ /* ignoring unrecoverable errors */
dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n",
result);
goto error;
diff --git a/drivers/uwb/neh.c b/drivers/uwb/neh.c
index a269937..8cb71bb 100644
--- a/drivers/uwb/neh.c
+++ b/drivers/uwb/neh.c
@@ -107,6 +107,7 @@ struct uwb_rc_neh {
u8 evt_type;
__le16 evt;
u8 context;
+ u8 completed;
uwb_rc_cmd_cb_f cb;
void *arg;
@@ -409,6 +410,7 @@ static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size
struct device *dev = &rc->uwb_dev.dev;
struct uwb_rc_neh *neh;
struct uwb_rceb *notif;
+ unsigned long flags;
if (rceb->bEventContext == 0) {
notif = kmalloc(size, GFP_ATOMIC);
@@ -422,7 +424,11 @@ static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size
} else {
neh = uwb_rc_neh_lookup(rc, rceb);
if (neh) {
- del_timer_sync(&neh->timer);
+ spin_lock_irqsave(&rc->neh_lock, flags);
+ /* to guard against a timeout */
+ neh->completed = 1;
+ del_timer(&neh->timer);
+ spin_unlock_irqrestore(&rc->neh_lock, flags);
uwb_rc_neh_cb(neh, rceb, size);
} else
dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n",
@@ -568,6 +574,10 @@ static void uwb_rc_neh_timer(unsigned long arg)
unsigned long flags;
spin_lock_irqsave(&rc->neh_lock, flags);
+ if (neh->completed) {
+ spin_unlock_irqrestore(&rc->neh_lock, flags);
+ return;
+ }
if (neh->context)
__uwb_rc_neh_rm(rc, neh);
else
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f0da2c3..5c17010 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -24,6 +24,7 @@
#include <linux/if_arp.h>
#include <linux/if_tun.h>
#include <linux/if_macvlan.h>
+#include <linux/if_vlan.h>
#include <net/sock.h>
@@ -238,7 +239,7 @@ static void handle_tx(struct vhost_net *net)
vq->heads[vq->upend_idx].len = len;
ubuf->callback = vhost_zerocopy_callback;
- ubuf->arg = vq->ubufs;
+ ubuf->ctx = vq->ubufs;
ubuf->desc = vq->upend_idx;
msg.msg_control = ubuf;
msg.msg_controllen = sizeof(ubuf);
@@ -283,8 +284,12 @@ static int peek_head_len(struct sock *sk)
spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
head = skb_peek(&sk->sk_receive_queue);
- if (likely(head))
+ if (likely(head)) {
len = head->len;
+ if (vlan_tx_tag_present(head))
+ len += VLAN_HLEN;
+ }
+
spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
return len;
}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 947f00d..51e4c1e 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1598,10 +1598,9 @@ void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs)
kfree(ubufs);
}
-void vhost_zerocopy_callback(void *arg)
+void vhost_zerocopy_callback(struct ubuf_info *ubuf)
{
- struct ubuf_info *ubuf = arg;
- struct vhost_ubuf_ref *ubufs = ubuf->arg;
+ struct vhost_ubuf_ref *ubufs = ubuf->ctx;
struct vhost_virtqueue *vq = ubufs->vq;
/* set len = 1 to mark this desc buffers done DMA */
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 8dcf4cc..8de1fd5 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -188,7 +188,7 @@ bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
unsigned int log_num, u64 len);
-void vhost_zerocopy_callback(void *arg);
+void vhost_zerocopy_callback(struct ubuf_info *);
int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq);
#define vq_err(vq, fmt, ...) do { \
diff --git a/drivers/video/bfin-lq035q1-fb.c b/drivers/video/bfin-lq035q1-fb.c
index 86922ac..353c02f 100644
--- a/drivers/video/bfin-lq035q1-fb.c
+++ b/drivers/video/bfin-lq035q1-fb.c
@@ -13,6 +13,7 @@
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/fb.h>
+#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/types.h>
diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c
index 6468a29..39571f9 100644
--- a/drivers/video/console/sticore.c
+++ b/drivers/video/console/sticore.c
@@ -22,7 +22,9 @@
#include <linux/font.h>
#include <asm/hardware.h>
+#include <asm/page.h>
#include <asm/parisc-device.h>
+#include <asm/pdc.h>
#include <asm/cacheflush.h>
#include <asm/grfioctl.h>
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 26e83d7..b0e2a42 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -73,7 +73,7 @@ static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *ns
struct uvesafb_task *utask;
struct uvesafb_ktask *task;
- if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
+ if (!capable(CAP_SYS_ADMIN))
return;
if (msg->seq >= UVESAFB_TASKS_MAX)
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index cb4529c..b7f5173 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -365,7 +365,7 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
struct fb_info *fb_info;
int fb_size;
int val;
- int ret;
+ int ret = 0;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL) {
@@ -458,26 +458,31 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
xenfb_init_shared_page(info, fb_info);
ret = xenfb_connect_backend(dev, info);
- if (ret < 0)
- goto error;
+ if (ret < 0) {
+ xenbus_dev_fatal(dev, ret, "xenfb_connect_backend");
+ goto error_fb;
+ }
ret = register_framebuffer(fb_info);
if (ret) {
- fb_deferred_io_cleanup(fb_info);
- fb_dealloc_cmap(&fb_info->cmap);
- framebuffer_release(fb_info);
xenbus_dev_fatal(dev, ret, "register_framebuffer");
- goto error;
+ goto error_fb;
}
info->fb_info = fb_info;
xenfb_make_preferred_console();
return 0;
- error_nomem:
- ret = -ENOMEM;
- xenbus_dev_fatal(dev, ret, "allocating device memory");
- error:
+error_fb:
+ fb_deferred_io_cleanup(fb_info);
+ fb_dealloc_cmap(&fb_info->cmap);
+ framebuffer_release(fb_info);
+error_nomem:
+ if (!ret) {
+ ret = -ENOMEM;
+ xenbus_dev_fatal(dev, ret, "allocating device memory");
+ }
+error:
xenfb_remove(dev);
return ret;
}
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index cbc7cee..9f13b89 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -435,16 +435,16 @@ static void hpwdt_start(void)
{
reload = SECS_TO_TICKS(soft_margin);
iowrite16(reload, hpwdt_timer_reg);
- iowrite16(0x85, hpwdt_timer_con);
+ iowrite8(0x85, hpwdt_timer_con);
}
static void hpwdt_stop(void)
{
unsigned long data;
- data = ioread16(hpwdt_timer_con);
+ data = ioread8(hpwdt_timer_con);
data &= 0xFE;
- iowrite16(data, hpwdt_timer_con);
+ iowrite8(data, hpwdt_timer_con);
}
static void hpwdt_ping(void)
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 9424313..ea20c51 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -183,15 +183,17 @@ config XEN_ACPI_PROCESSOR
depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ
default m
help
- This ACPI processor uploads Power Management information to the Xen hypervisor.
-
- To do that the driver parses the Power Management data and uploads said
- information to the Xen hypervisor. Then the Xen hypervisor can select the
- proper Cx and Pxx states. It also registers itslef as the SMM so that
- other drivers (such as ACPI cpufreq scaling driver) will not load.
-
- To compile this driver as a module, choose M here: the
- module will be called xen_acpi_processor If you do not know what to choose,
- select M here. If the CPUFREQ drivers are built in, select Y here.
+ This ACPI processor uploads Power Management information to the Xen
+ hypervisor.
+
+ To do that the driver parses the Power Management data and uploads
+ said information to the Xen hypervisor. Then the Xen hypervisor can
+ select the proper Cx and Pxx states. It also registers itslef as the
+ SMM so that other drivers (such as ACPI cpufreq scaling driver) will
+ not load.
+
+ To compile this driver as a module, choose M here: the module will be
+ called xen_acpi_processor If you do not know what to choose, select
+ M here. If the CPUFREQ drivers are built in, select Y here.
endmenu
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 4b33acd..0a8a17c 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -274,7 +274,7 @@ static unsigned int cpu_from_evtchn(unsigned int evtchn)
static bool pirq_check_eoi_map(unsigned irq)
{
- return test_bit(irq, pirq_eoi_map);
+ return test_bit(pirq_from_irq(irq), pirq_eoi_map);
}
static bool pirq_needs_eoi_flag(unsigned irq)
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 99d8151..1ffd03b 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -722,7 +722,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED|VM_DONTEXPAND;
if (use_ptemod)
- vma->vm_flags |= VM_DONTCOPY|VM_PFNMAP;
+ vma->vm_flags |= VM_DONTCOPY;
vma->vm_private_data = map;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index b4d4eac..f100ce2 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1029,6 +1029,7 @@ int gnttab_init(void)
int i;
unsigned int max_nr_glist_frames, nr_glist_frames;
unsigned int nr_init_grefs;
+ int ret;
nr_grant_frames = 1;
boot_max_nr_grant_frames = __max_nr_grant_frames();
@@ -1047,12 +1048,16 @@ int gnttab_init(void)
nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
for (i = 0; i < nr_glist_frames; i++) {
gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
- if (gnttab_list[i] == NULL)
+ if (gnttab_list[i] == NULL) {
+ ret = -ENOMEM;
goto ini_nomem;
+ }
}
- if (gnttab_resume() < 0)
- return -ENODEV;
+ if (gnttab_resume() < 0) {
+ ret = -ENODEV;
+ goto ini_nomem;
+ }
nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME;
@@ -1070,7 +1075,7 @@ int gnttab_init(void)
for (i--; i >= 0; i--)
free_page((unsigned long)gnttab_list[i]);
kfree(gnttab_list);
- return -ENOMEM;
+ return ret;
}
EXPORT_SYMBOL_GPL(gnttab_init);
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 9e14ae6..412b96c 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -132,6 +132,7 @@ static void do_suspend(void)
err = dpm_suspend_end(PMSG_FREEZE);
if (err) {
printk(KERN_ERR "dpm_suspend_end failed: %d\n", err);
+ si.cancelled = 0;
goto out_resume;
}
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 174b565..0b48579 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -128,7 +128,10 @@ static int push_cxx_to_hypervisor(struct acpi_processor *_pr)
pr_debug(" C%d: %s %d uS\n",
cx->type, cx->desc, (u32)cx->latency);
}
- } else
+ } else if (ret != -EINVAL)
+ /* EINVAL means the ACPI ID is incorrect - meaning the ACPI
+ * table is referencing a non-existing CPU - which can happen
+ * with broken ACPI tables. */
pr_err(DRV_NAME "(CX): Hypervisor error (%d) for ACPI CPU%u\n",
ret, _pr->acpi_id);
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index f20c5f1..a31b54d 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -135,7 +135,7 @@ static int read_backend_details(struct xenbus_device *xendev)
return xenbus_read_otherend_details(xendev, "backend-id", "backend");
}
-static int is_device_connecting(struct device *dev, void *data)
+static int is_device_connecting(struct device *dev, void *data, bool ignore_nonessential)
{
struct xenbus_device *xendev = to_xenbus_device(dev);
struct device_driver *drv = data;
@@ -152,16 +152,41 @@ static int is_device_connecting(struct device *dev, void *data)
if (drv && (dev->driver != drv))
return 0;
+ if (ignore_nonessential) {
+ /* With older QEMU, for PVonHVM guests the guest config files
+ * could contain: vfb = [ 'vnc=1, vnclisten=0.0.0.0']
+ * which is nonsensical as there is no PV FB (there can be
+ * a PVKB) running as HVM guest. */
+
+ if ((strncmp(xendev->nodename, "device/vkbd", 11) == 0))
+ return 0;
+
+ if ((strncmp(xendev->nodename, "device/vfb", 10) == 0))
+ return 0;
+ }
xendrv = to_xenbus_driver(dev->driver);
return (xendev->state < XenbusStateConnected ||
(xendev->state == XenbusStateConnected &&
xendrv->is_ready && !xendrv->is_ready(xendev)));
}
+static int essential_device_connecting(struct device *dev, void *data)
+{
+ return is_device_connecting(dev, data, true /* ignore PV[KBB+FB] */);
+}
+static int non_essential_device_connecting(struct device *dev, void *data)
+{
+ return is_device_connecting(dev, data, false);
+}
-static int exists_connecting_device(struct device_driver *drv)
+static int exists_essential_connecting_device(struct device_driver *drv)
{
return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
- is_device_connecting);
+ essential_device_connecting);
+}
+static int exists_non_essential_connecting_device(struct device_driver *drv)
+{
+ return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+ non_essential_device_connecting);
}
static int print_device_status(struct device *dev, void *data)
@@ -192,6 +217,23 @@ static int print_device_status(struct device *dev, void *data)
/* We only wait for device setup after most initcalls have run. */
static int ready_to_wait_for_devices;
+static bool wait_loop(unsigned long start, unsigned int max_delay,
+ unsigned int *seconds_waited)
+{
+ if (time_after(jiffies, start + (*seconds_waited+5)*HZ)) {
+ if (!*seconds_waited)
+ printk(KERN_WARNING "XENBUS: Waiting for "
+ "devices to initialise: ");
+ *seconds_waited += 5;
+ printk("%us...", max_delay - *seconds_waited);
+ if (*seconds_waited == max_delay)
+ return true;
+ }
+
+ schedule_timeout_interruptible(HZ/10);
+
+ return false;
+}
/*
* On a 5-minute timeout, wait for all devices currently configured. We need
* to do this to guarantee that the filesystems and / or network devices
@@ -215,19 +257,14 @@ static void wait_for_devices(struct xenbus_driver *xendrv)
if (!ready_to_wait_for_devices || !xen_domain())
return;
- while (exists_connecting_device(drv)) {
- if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
- if (!seconds_waited)
- printk(KERN_WARNING "XENBUS: Waiting for "
- "devices to initialise: ");
- seconds_waited += 5;
- printk("%us...", 300 - seconds_waited);
- if (seconds_waited == 300)
- break;
- }
-
- schedule_timeout_interruptible(HZ/10);
- }
+ while (exists_non_essential_connecting_device(drv))
+ if (wait_loop(start, 30, &seconds_waited))
+ break;
+
+ /* Skips PVKB and PVFB check.*/
+ while (exists_essential_connecting_device(drv))
+ if (wait_loop(start, 270, &seconds_waited))
+ break;
if (seconds_waited)
printk("\n");
OpenPOWER on IntegriCloud