summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/acpica/aclocal.h7
-rw-r--r--drivers/acpi/acpica/evgpe.c17
-rw-r--r--drivers/acpi/acpica/evxfgpe.c42
-rw-r--r--drivers/acpi/bus.c23
-rw-r--r--drivers/acpi/debugfs.c20
-rw-r--r--drivers/acpi/numa.c9
-rw-r--r--drivers/acpi/osl.c6
-rw-r--r--drivers/acpi/sleep.c5
-rw-r--r--drivers/ata/Kconfig18
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c8
-rw-r--r--drivers/ata/ahci.h6
-rw-r--r--drivers/ata/ata_generic.c2
-rw-r--r--drivers/ata/ata_piix.c2
-rw-r--r--drivers/ata/libata-acpi.c3
-rw-r--r--drivers/ata/libata-core.c54
-rw-r--r--drivers/ata/libata-eh.c60
-rw-r--r--drivers/ata/libata-scsi.c15
-rw-r--r--drivers/ata/libata-sff.c17
-rw-r--r--drivers/ata/libata.h1
-rw-r--r--drivers/ata/pata_acpi.c2
-rw-r--r--drivers/ata/pata_arasan_cf.c983
-rw-r--r--drivers/ata/pata_at32.c2
-rw-r--r--drivers/ata/pata_bf54x.c4
-rw-r--r--drivers/ata/pata_hpt366.c7
-rw-r--r--drivers/ata/pata_hpt37x.c23
-rw-r--r--drivers/ata/pata_hpt3x2n.c13
-rw-r--r--drivers/ata/pata_hpt3x3.c2
-rw-r--r--drivers/ata/pata_it821x.c4
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c2
-rw-r--r--drivers/ata/pata_macio.c3
-rw-r--r--drivers/ata/pata_marvell.c2
-rw-r--r--drivers/ata/pata_ninja32.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c3
-rw-r--r--drivers/ata/pata_palmld.c2
-rw-r--r--drivers/ata/pata_pcmcia.c2
-rw-r--r--drivers/ata/pata_pdc2027x.c6
-rw-r--r--drivers/ata/pata_pxa.c1
-rw-r--r--drivers/ata/pata_rb532_cf.c1
-rw-r--r--drivers/ata/pata_samsung_cf.c1
-rw-r--r--drivers/ata/pata_scc.c2
-rw-r--r--drivers/ata/pata_sis.c2
-rw-r--r--drivers/ata/pdc_adma.c4
-rw-r--r--drivers/ata/sata_dwc_460ex.c75
-rw-r--r--drivers/ata/sata_fsl.c22
-rw-r--r--drivers/ata/sata_mv.c3
-rw-r--r--drivers/ata/sata_nv.c14
-rw-r--r--drivers/ata/sata_promise.c4
-rw-r--r--drivers/ata/sata_qstor.c3
-rw-r--r--drivers/ata/sata_sil.c3
-rw-r--r--drivers/ata/sata_sil24.c3
-rw-r--r--drivers/ata/sata_sis.c2
-rw-r--r--drivers/ata/sata_svw.c12
-rw-r--r--drivers/ata/sata_sx4.c5
-rw-r--r--drivers/ata/sata_uli.c3
-rw-r--r--drivers/ata/sata_via.c9
-rw-r--r--drivers/ata/sata_vsc.c3
-rw-r--r--drivers/atm/solos-pci.c5
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/power/Makefile3
-rw-r--r--drivers/base/power/main.c175
-rw-r--r--drivers/base/power/opp.c2
-rw-r--r--drivers/base/power/power.h21
-rw-r--r--drivers/base/power/runtime.c37
-rw-r--r--drivers/base/power/sysfs.c78
-rw-r--r--drivers/base/power/trace.c6
-rw-r--r--drivers/base/power/wakeup.c109
-rw-r--r--drivers/base/syscore.c117
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c5
-rw-r--r--drivers/block/xen-blkfront.c87
-rw-r--r--drivers/bluetooth/ath3k.c5
-rw-r--r--drivers/bluetooth/btusb.c12
-rw-r--r--drivers/char/agp/amd64-agp.c9
-rw-r--r--drivers/char/agp/intel-agp.h1
-rw-r--r--drivers/char/agp/intel-gtt.c56
-rw-r--r--drivers/char/hw_random/Kconfig12
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/omap-rng.c14
-rw-r--r--drivers/char/hw_random/picoxcell-rng.c208
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c8
-rw-r--r--drivers/char/mmtimer.c30
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c3
-rw-r--r--drivers/char/pcmcia/ipwireless/main.c52
-rw-r--r--drivers/char/random.c13
-rw-r--r--drivers/char/tpm/tpm.c28
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm_tis.c4
-rw-r--r--drivers/char/virtio_console.c8
-rw-r--r--drivers/cpufreq/cpufreq.c27
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c22
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c20
-rw-r--r--drivers/crypto/Kconfig17
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/omap-aes.c4
-rw-r--r--drivers/crypto/omap-sham.c4
-rw-r--r--drivers/crypto/picoxcell_crypto.c1867
-rw-r--r--drivers/crypto/picoxcell_crypto_regs.h128
-rw-r--r--drivers/gpio/ml_ioh_gpio.c1
-rw-r--r--drivers/gpio/pch_gpio.c1
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c4
-rw-r--r--drivers/gpu/drm/drm_irq.c29
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c11
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c17
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h24
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c21
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c103
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c9
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c46
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c4
-rw-r--r--drivers/gpu/drm/radeon/r100.c26
-rw-r--r--drivers/gpu/drm/radeon/r300.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c14
-rw-r--r--drivers/gpu/drm/radeon/rs600.c1
-rw-r--r--drivers/gpu/drm/radeon/rs690.c1
-rw-r--r--drivers/gpu/drm/radeon/rv770.c3
-rw-r--r--drivers/hwmon/Kconfig19
-rw-r--r--drivers/hwmon/ad7414.c1
-rw-r--r--drivers/hwmon/adt7411.c1
-rw-r--r--drivers/hwmon/f71882fg.c4
-rw-r--r--drivers/hwmon/jc42.c35
-rw-r--r--drivers/hwmon/k10temp.c5
-rw-r--r--drivers/hwmon/lm85.c23
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c1
-rw-r--r--drivers/i2c/busses/i2c-ocores.c16
-rw-r--r--drivers/i2c/busses/i2c-omap.c39
-rw-r--r--drivers/i2c/busses/i2c-stu300.c2
-rw-r--r--drivers/i2c/i2c-core.c2
-rw-r--r--drivers/idle/intel_idle.c24
-rw-r--r--drivers/infiniband/core/cm.c20
-rw-r--r--drivers/infiniband/core/cma.c58
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c24
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sysfs.c1
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c32
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c13
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c5
-rw-r--r--drivers/input/gameport/gameport.c2
-rw-r--r--drivers/input/keyboard/tegra-kbc.c62
-rw-r--r--drivers/input/mouse/synaptics.h23
-rw-r--r--drivers/input/serio/serio.c2
-rw-r--r--drivers/input/touchscreen/tps6507x-ts.c12
-rw-r--r--drivers/isdn/hardware/eicon/istream.c2
-rw-r--r--drivers/isdn/hisax/isdnl2.c28
-rw-r--r--drivers/md/linear.c1
-rw-r--r--drivers/md/md.c33
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/multipath.c1
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c6
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5.c1
-rw-r--r--drivers/media/common/tuners/tda8290.c14
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c21
-rw-r--r--drivers/media/dvb/dvb-usb/lmedm04.c6
-rw-r--r--drivers/media/dvb/frontends/dib7000m.c19
-rw-r--r--drivers/media/dvb/frontends/dib7000m.h15
-rw-r--r--drivers/media/dvb/mantis/mantis_pci.c1
-rw-r--r--drivers/media/rc/ir-raw.c3
-rw-r--r--drivers/media/rc/mceusb.c27
-rw-r--r--drivers/media/rc/nuvoton-cir.c5
-rw-r--r--drivers/media/rc/nuvoton-cir.h7
-rw-r--r--drivers/media/rc/rc-main.c2
-rw-r--r--drivers/media/video/au0828/au0828-video.c28
-rw-r--r--drivers/media/video/cx18/cx18-cards.c50
-rw-r--r--drivers/media/video/cx18/cx18-driver.c25
-rw-r--r--drivers/media/video/cx18/cx18-driver.h3
-rw-r--r--drivers/media/video/cx18/cx18-dvb.c38
-rw-r--r--drivers/media/video/cx23885/cx23885-i2c.c10
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c3
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.c58
-rw-r--r--drivers/media/video/mem2mem_testdev.c1
-rw-r--r--drivers/media/video/s2255drv.c10
-rw-r--r--drivers/memstick/core/memstick.c2
-rw-r--r--drivers/message/fusion/mptbase.h4
-rw-r--r--drivers/message/fusion/mptctl.c8
-rw-r--r--drivers/message/fusion/mptscsih.c7
-rw-r--r--drivers/message/i2o/driver.c3
-rw-r--r--drivers/mfd/asic3.c4
-rw-r--r--drivers/mfd/davinci_voicecodec.c4
-rw-r--r--drivers/mfd/tps6586x.c10
-rw-r--r--drivers/mfd/ucb1x00-ts.c12
-rw-r--r--drivers/mfd/wm8994-core.c18
-rw-r--r--drivers/misc/bmp085.c1
-rw-r--r--drivers/misc/iwmc3200top/iwmc3200top.h4
-rw-r--r--drivers/misc/iwmc3200top/main.c14
-rw-r--r--drivers/misc/tifm_core.c2
-rw-r--r--drivers/misc/vmw_balloon.c2
-rw-r--r--drivers/mmc/core/core.c2
-rw-r--r--drivers/mmc/core/sdio.c3
-rw-r--r--drivers/mmc/host/mmc_spi.c4
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c43
-rw-r--r--drivers/mtd/chips/jedec_probe.c35
-rw-r--r--drivers/mtd/maps/amd76xrom.c1
-rw-r--r--drivers/mtd/mtd_blkdevs.c1
-rw-r--r--drivers/mtd/nand/omap2.c2
-rw-r--r--drivers/mtd/nand/r852.c2
-rw-r--r--drivers/mtd/onenand/generic.c2
-rw-r--r--drivers/mtd/onenand/omap2.c2
-rw-r--r--drivers/mtd/sm_ftl.c2
-rw-r--r--drivers/net/ariadne.c5
-rw-r--r--drivers/net/bnx2x/bnx2x.h31
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c87
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h29
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c39
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c37
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c4
-rw-r--r--drivers/net/bonding/bond_3ad.c32
-rw-r--r--drivers/net/bonding/bond_3ad.h3
-rw-r--r--drivers/net/can/mcp251x.c2
-rw-r--r--drivers/net/can/softing/Kconfig2
-rw-r--r--drivers/net/can/softing/softing_main.c1
-rw-r--r--drivers/net/cnic.c33
-rw-r--r--drivers/net/cxgb4/t4_msg.h1
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c80
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c2
-rw-r--r--drivers/net/davinci_emac.c2
-rw-r--r--drivers/net/dm9000.c9
-rw-r--r--drivers/net/dnet.c3
-rw-r--r--drivers/net/e1000/e1000_osdep.h3
-rw-r--r--drivers/net/e1000e/netdev.c63
-rw-r--r--drivers/net/ethoc.c8
-rw-r--r--drivers/net/fec.c3
-rw-r--r--drivers/net/forcedeth.c2
-rw-r--r--drivers/net/igbvf/vf.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c51
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/macb.c2
-rw-r--r--drivers/net/macvtap.c3
-rw-r--r--drivers/net/pch_gbe/pch_gbe.h2
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c106
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c1
-rw-r--r--drivers/net/r6040.c115
-rw-r--r--drivers/net/r8169.c50
-rw-r--r--drivers/net/sfc/ethtool.c22
-rw-r--r--drivers/net/skge.c3
-rw-r--r--drivers/net/smsc911x.c5
-rw-r--r--drivers/net/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/tg3.c8
-rw-r--r--drivers/net/usb/dm9601.c4
-rw-r--r--drivers/net/usb/hso.c12
-rw-r--r--drivers/net/usb/usbnet.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c143
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c8
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c70
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.h1
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c196
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c67
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c2
-rw-r--r--drivers/net/wireless/p54/p54pci.c14
-rw-r--r--drivers/net/wireless/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/rndis_wlan.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c6
-rw-r--r--drivers/nfc/Kconfig2
-rw-r--r--drivers/nfc/pn544.c4
-rw-r--r--drivers/of/Kconfig6
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/of_pci.c92
-rw-r--r--drivers/of/pdt.c112
-rw-r--r--drivers/pci/pci-driver.c4
-rw-r--r--drivers/pci/xen-pcifront.c31
-rw-r--r--drivers/pcmcia/pcmcia_resource.c2
-rw-r--r--drivers/pcmcia/pxa2xx_base.c2
-rw-r--r--drivers/pcmcia/pxa2xx_base.h1
-rw-r--r--drivers/pcmcia/pxa2xx_colibri.c3
-rw-r--r--drivers/pcmcia/pxa2xx_lubbock.c1
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/acer-wmi.c4
-rw-r--r--drivers/platform/x86/asus_acpi.c8
-rw-r--r--drivers/platform/x86/dell-laptop.c24
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c116
-rw-r--r--drivers/platform/x86/tc1100-wmi.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c8
-rw-r--r--drivers/pps/generators/Kconfig2
-rw-r--r--drivers/pps/kapi.c2
-rw-r--r--drivers/rapidio/rio-sysfs.c12
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c2
-rw-r--r--drivers/regulator/wm831x-dcdc.c1
-rw-r--r--drivers/rtc/Kconfig12
-rw-r--r--drivers/rtc/class.c7
-rw-r--r--drivers/rtc/interface.c203
-rw-r--r--drivers/rtc/rtc-at91rm9200.c28
-rw-r--r--drivers/rtc/rtc-at91sam9.c30
-rw-r--r--drivers/rtc/rtc-bfin.c27
-rw-r--r--drivers/rtc/rtc-cmos.c111
-rw-r--r--drivers/rtc/rtc-davinci.c55
-rw-r--r--drivers/rtc/rtc-dev.c104
-rw-r--r--drivers/rtc/rtc-ds1511.c17
-rw-r--r--drivers/rtc/rtc-ds1553.c17
-rw-r--r--drivers/rtc/rtc-ds3232.c32
-rw-r--r--drivers/rtc/rtc-jz4740.c7
-rw-r--r--drivers/rtc/rtc-mc13xxx.c7
-rw-r--r--drivers/rtc/rtc-mpc5121.c20
-rw-r--r--drivers/rtc/rtc-mrst.c33
-rw-r--r--drivers/rtc/rtc-mxc.c7
-rw-r--r--drivers/rtc/rtc-nuc900.c15
-rw-r--r--drivers/rtc/rtc-omap.c39
-rw-r--r--drivers/rtc/rtc-pcap.c6
-rw-r--r--drivers/rtc/rtc-pcf50633.c22
-rw-r--r--drivers/rtc/rtc-pl030.c6
-rw-r--r--drivers/rtc/rtc-pl031.c55
-rw-r--r--drivers/rtc/rtc-proc.c8
-rw-r--r--drivers/rtc/rtc-pxa.c44
-rw-r--r--drivers/rtc/rtc-rs5c372.c52
-rw-r--r--drivers/rtc/rtc-rx8025.c25
-rw-r--r--drivers/rtc/rtc-s3c.c41
-rw-r--r--drivers/rtc/rtc-sa1100.c160
-rw-r--r--drivers/rtc/rtc-sh.c24
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c15
-rw-r--r--drivers/rtc/rtc-test.c13
-rw-r--r--drivers/rtc/rtc-twl.c13
-rw-r--r--drivers/rtc/rtc-vr41xx.c32
-rw-r--r--drivers/rtc/rtc-wm831x.c16
-rw-r--r--drivers/rtc/rtc-wm8350.c21
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/block/xpram.c4
-rw-r--r--drivers/s390/char/keyboard.c3
-rw-r--r--drivers/s390/char/tape.h8
-rw-r--r--drivers/s390/char/tape_34xx.c59
-rw-r--r--drivers/s390/char/tape_3590.c83
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c2
-rw-r--r--drivers/scsi/ipr.c9
-rw-r--r--drivers/scsi/libsas/sas_ata.c94
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c12
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_sysfs.c2
-rw-r--r--drivers/scsi/scsi_tgt_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/spi/pxa2xx_spi.c2
-rw-r--r--drivers/spi/pxa2xx_spi_pci.c63
-rw-r--r--drivers/spi/xilinx_spi.c6
-rw-r--r--drivers/target/Makefile3
-rw-r--r--drivers/target/target_core_configfs.c155
-rw-r--r--drivers/target/target_core_device.c13
-rw-r--r--drivers/target/target_core_fabric_configfs.c92
-rw-r--r--drivers/target/target_core_iblock.c8
-rw-r--r--drivers/target/target_core_mib.c1078
-rw-r--r--drivers/target/target_core_mib.h28
-rw-r--r--drivers/target/target_core_pscsi.c4
-rw-r--r--drivers/target/target_core_tmr.c5
-rw-r--r--drivers/target/target_core_tpg.c29
-rw-r--r--drivers/target/target_core_transport.c56
-rw-r--r--drivers/thermal/Kconfig1
-rw-r--r--drivers/thermal/thermal_sys.c40
-rw-r--r--drivers/tty/serial/max3100.c2
-rw-r--r--drivers/tty/serial/max3107.c2
-rw-r--r--drivers/tty/serial/serial_cs.c1
-rw-r--r--drivers/usb/core/hcd-pci.c4
-rw-r--r--drivers/usb/core/hub.c28
-rw-r--r--drivers/usb/core/quirks.c8
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c1
-rw-r--r--drivers/usb/host/xhci-dbg.c9
-rw-r--r--drivers/usb/host/xhci-mem.c10
-rw-r--r--drivers/usb/host/xhci-ring.c40
-rw-r--r--drivers/usb/host/xhci.c14
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/musb/musb_core.c1
-rw-r--r--drivers/usb/musb/musb_core.h17
-rw-r--r--drivers/usb/musb/omap2430.c1
-rw-r--r--drivers/usb/serial/sierra.c3
-rw-r--r--drivers/usb/serial/usb_wwan.c15
-rw-r--r--drivers/usb/serial/visor.c12
-rw-r--r--drivers/video/backlight/ltv350qv.c9
-rw-r--r--drivers/watchdog/cpwd.c2
-rw-r--r--drivers/watchdog/hpwdt.c4
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c7
-rw-r--r--drivers/watchdog/sch311x_wdt.c2
-rw-r--r--drivers/watchdog/w83697ug_wdt.c2
-rw-r--r--drivers/xen/balloon.c16
-rw-r--r--drivers/xen/events.c342
-rw-r--r--drivers/xen/manage.c143
-rw-r--r--drivers/xen/platform-pci.c3
421 files changed, 7669 insertions, 4430 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 2aa042a..3a17ca5 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -7,7 +7,6 @@ menuconfig ACPI
depends on !IA64_HP_SIM
depends on IA64 || X86
depends on PCI
- depends on PM
select PNP
default y
help
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 54784bb..edc2586 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -416,10 +416,15 @@ struct acpi_gpe_handler_info {
u8 originally_enabled; /* True if GPE was originally enabled */
};
+struct acpi_gpe_notify_object {
+ struct acpi_namespace_node *node;
+ struct acpi_gpe_notify_object *next;
+};
+
union acpi_gpe_dispatch_info {
struct acpi_namespace_node *method_node; /* Method node for this GPE level */
struct acpi_gpe_handler_info *handler; /* Installed GPE handler */
- struct acpi_namespace_node *device_node; /* Parent _PRW device for implicit notify */
+ struct acpi_gpe_notify_object device; /* List of _PRW devices for implicit notify */
};
/*
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 14988a8..f472521 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -457,6 +457,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
acpi_status status;
struct acpi_gpe_event_info *local_gpe_event_info;
struct acpi_evaluate_info *info;
+ struct acpi_gpe_notify_object *notify_object;
ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
@@ -508,10 +509,18 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
* from this thread -- because handlers may in turn run other
* control methods.
*/
- status =
- acpi_ev_queue_notify_request(local_gpe_event_info->dispatch.
- device_node,
- ACPI_NOTIFY_DEVICE_WAKE);
+ status = acpi_ev_queue_notify_request(
+ local_gpe_event_info->dispatch.device.node,
+ ACPI_NOTIFY_DEVICE_WAKE);
+
+ notify_object = local_gpe_event_info->dispatch.device.next;
+ while (ACPI_SUCCESS(status) && notify_object) {
+ status = acpi_ev_queue_notify_request(
+ notify_object->node,
+ ACPI_NOTIFY_DEVICE_WAKE);
+ notify_object = notify_object->next;
+ }
+
break;
case ACPI_GPE_DISPATCH_METHOD:
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 3b20a34..52aaff3 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -198,7 +198,9 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
acpi_status status = AE_BAD_PARAMETER;
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_namespace_node *device_node;
+ struct acpi_gpe_notify_object *notify_object;
acpi_cpu_flags flags;
+ u8 gpe_dispatch_mask;
ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake);
@@ -221,27 +223,49 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
goto unlock_and_exit;
}
+ if (wake_device == ACPI_ROOT_OBJECT) {
+ goto out;
+ }
+
/*
* If there is no method or handler for this GPE, then the
* wake_device will be notified whenever this GPE fires (aka
* "implicit notify") Note: The GPE is assumed to be
* level-triggered (for windows compatibility).
*/
- if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
- ACPI_GPE_DISPATCH_NONE) && (wake_device != ACPI_ROOT_OBJECT)) {
+ gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK;
+ if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE
+ && gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) {
+ goto out;
+ }
- /* Validate wake_device is of type Device */
+ /* Validate wake_device is of type Device */
- device_node = ACPI_CAST_PTR(struct acpi_namespace_node,
- wake_device);
- if (device_node->type != ACPI_TYPE_DEVICE) {
- goto unlock_and_exit;
- }
+ device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
+ if (device_node->type != ACPI_TYPE_DEVICE) {
+ goto unlock_and_exit;
+ }
+
+ if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) {
gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
ACPI_GPE_LEVEL_TRIGGERED);
- gpe_event_info->dispatch.device_node = device_node;
+ gpe_event_info->dispatch.device.node = device_node;
+ gpe_event_info->dispatch.device.next = NULL;
+ } else {
+ /* There are multiple devices to notify implicitly. */
+
+ notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object));
+ if (!notify_object) {
+ status = AE_NO_MEMORY;
+ goto unlock_and_exit;
+ }
+
+ notify_object->node = device_node;
+ notify_object->next = gpe_event_info->dispatch.device.next;
+ gpe_event_info->dispatch.device.next = notify_object;
}
+ out:
gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
status = AE_OK;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 7ced61f..9749980 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -40,6 +40,7 @@
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <linux/dmi.h>
+#include <linux/suspend.h>
#include "internal.h"
@@ -1006,8 +1007,7 @@ struct kobject *acpi_kobj;
static int __init acpi_init(void)
{
- int result = 0;
-
+ int result;
if (acpi_disabled) {
printk(KERN_INFO PREFIX "Interpreter disabled.\n");
@@ -1022,29 +1022,18 @@ static int __init acpi_init(void)
init_acpi_device_notify();
result = acpi_bus_init();
-
- if (!result) {
- pci_mmcfg_late_init();
- if (!(pm_flags & PM_APM))
- pm_flags |= PM_ACPI;
- else {
- printk(KERN_INFO PREFIX
- "APM is already active, exiting\n");
- disable_acpi();
- result = -ENODEV;
- }
- } else
+ if (result) {
disable_acpi();
-
- if (acpi_disabled)
return result;
+ }
+ pci_mmcfg_late_init();
acpi_scan_init();
acpi_ec_init();
acpi_debugfs_init();
acpi_sleep_proc_init();
acpi_wakeup_device_init();
- return result;
+ return 0;
}
subsys_initcall(acpi_init);
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
index 5df67f1..384f7ab 100644
--- a/drivers/acpi/debugfs.c
+++ b/drivers/acpi/debugfs.c
@@ -26,7 +26,9 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
size_t count, loff_t *ppos)
{
static char *buf;
- static int uncopied_bytes;
+ static u32 max_size;
+ static u32 uncopied_bytes;
+
struct acpi_table_header table;
acpi_status status;
@@ -37,19 +39,24 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
if (copy_from_user(&table, user_buf,
sizeof(struct acpi_table_header)))
return -EFAULT;
- uncopied_bytes = table.length;
- buf = kzalloc(uncopied_bytes, GFP_KERNEL);
+ uncopied_bytes = max_size = table.length;
+ buf = kzalloc(max_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
}
- if (uncopied_bytes < count) {
- kfree(buf);
+ if (buf == NULL)
+ return -EINVAL;
+
+ if ((*ppos > max_size) ||
+ (*ppos + count > max_size) ||
+ (*ppos + count < count) ||
+ (count > uncopied_bytes))
return -EINVAL;
- }
if (copy_from_user(buf + (*ppos), user_buf, count)) {
kfree(buf);
+ buf = NULL;
return -EFAULT;
}
@@ -59,6 +66,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
if (!uncopied_bytes) {
status = acpi_install_method(buf);
kfree(buf);
+ buf = NULL;
if (ACPI_FAILURE(status))
return -EINVAL;
add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 5eb25eb..3b5c318 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -274,7 +274,7 @@ acpi_table_parse_srat(enum acpi_srat_type id,
int __init acpi_numa_init(void)
{
- int ret = 0;
+ int cnt = 0;
/*
* Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
@@ -288,7 +288,7 @@ int __init acpi_numa_init(void)
acpi_parse_x2apic_affinity, 0);
acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
acpi_parse_processor_affinity, 0);
- ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
+ cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
acpi_parse_memory_affinity,
NR_NODE_MEMBLKS);
}
@@ -297,7 +297,10 @@ int __init acpi_numa_init(void)
acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
acpi_numa_arch_fixup();
- return ret;
+
+ if (cnt <= 0)
+ return cnt ?: -ENOENT;
+ return 0;
}
int acpi_get_pxm(acpi_handle h)
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index c90c76a..4a67530 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -1589,9 +1589,9 @@ acpi_status __init acpi_os_initialize(void)
acpi_status __init acpi_os_initialize1(void)
{
- kacpid_wq = create_workqueue("kacpid");
- kacpi_notify_wq = create_workqueue("kacpi_notify");
- kacpi_hotplug_wq = create_workqueue("kacpi_hotplug");
+ kacpid_wq = alloc_workqueue("kacpid", 0, 1);
+ kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
+ kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1);
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
BUG_ON(!kacpi_hotplug_wq);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index d6a8cd1..1850dac 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -16,6 +16,7 @@
#include <linux/device.h>
#include <linux/suspend.h>
#include <linux/reboot.h>
+#include <linux/acpi.h>
#include <asm/io.h>
@@ -585,7 +586,7 @@ int acpi_suspend(u32 acpi_state)
return -EINVAL;
}
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
/**
* acpi_pm_device_sleep_state - return preferred power state of ACPI device
* in the system sleep state given by %acpi_target_sleep_state
@@ -671,7 +672,7 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
*d_min_p = d_min;
return d_max;
}
-#endif /* CONFIG_PM_OPS */
+#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
/**
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index c2328ae..75afa75 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -202,6 +202,18 @@ config SATA_DWC
If unsure, say N.
+config SATA_DWC_DEBUG
+ bool "Debugging driver version"
+ depends on SATA_DWC
+ help
+ This option enables debugging output in the driver.
+
+config SATA_DWC_VDEBUG
+ bool "Verbose debug output"
+ depends on SATA_DWC_DEBUG
+ help
+ This option enables the taskfile dumping and NCQ debugging.
+
config SATA_MV
tristate "Marvell SATA support"
help
@@ -299,6 +311,12 @@ config PATA_AMD
If unsure, say N.
+config PATA_ARASAN_CF
+ tristate "ARASAN CompactFlash PATA Controller Support"
+ select DMA_ENGINE
+ help
+ Say Y here to support the ARASAN CompactFlash PATA controller
+
config PATA_ARTOP
tristate "ARTOP 6210/6260 PATA support"
depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 27291aa..8ac64e1 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o
# SFF w/ custom DMA
obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
+obj-$(CONFIG_PATA_ARASAN_CF) += pata_arasan_cf.o
obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o
obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o
obj-$(CONFIG_SATA_SX4) += sata_sx4.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index b8d96ce..e62f693 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -175,8 +175,7 @@ static const struct ata_port_info ahci_port_info[] = {
{
AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
@@ -260,6 +259,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
{ PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
{ PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
+ { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */
{ PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
@@ -383,6 +383,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.class = PCI_CLASS_STORAGE_SATA_AHCI,
.class_mask = 0xffffff,
.driver_data = board_ahci_yes_fbs }, /* 88se9128 */
+ { PCI_DEVICE(0x1b4b, 0x9125),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
+ { PCI_DEVICE(0x1b4b, 0x91a3),
+ .driver_data = board_ahci_yes_fbs },
/* Promise */
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 3e606c3..ccaf081 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -213,10 +213,8 @@ enum {
/* ap->flags bits */
- AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
- ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
- ATA_FLAG_LPM,
+ AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
+ ATA_FLAG_ACPI_SATA | ATA_FLAG_AN,
ICH_MAP = 0x90, /* ICH MAP register */
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 6981f76..721d38b 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -237,7 +237,7 @@ static struct pci_device_id ata_generic[] = {
#endif
/* Intel, IDE class device */
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL,
+ PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL,
.driver_data = ATA_GEN_INTEL_IDER },
/* Must come last. If you add entries adjust this table appropriately */
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL),
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 6cb14ca..cdec4ab 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -230,7 +230,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
{ 0x8086, 0x2850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* SATA ports */
-
+
/* 82801EB (ICH5) */
{ 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata },
/* 82801EB (ICH5) */
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 8b5ea39..a791b8ce 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -660,8 +660,7 @@ static int ata_acpi_filter_tf(struct ata_device *dev,
* @dev: target ATA device
* @gtf: raw ATA taskfile register set (0x1f1 - 0x1f7)
*
- * Outputs ATA taskfile to standard ATA host controller using MMIO
- * or PIO as indicated by the ATA_FLAG_MMIO flag.
+ * Outputs ATA taskfile to standard ATA host controller.
* Writes the control, feature, nsect, lbal, lbam, and lbah registers.
* Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
* hob_lbal, hob_lbam, and hob_lbah.
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index d4e52e2..b91e19c 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4210,7 +4210,7 @@ static int glob_match (const char *text, const char *pattern)
return 0; /* End of both strings: match */
return 1; /* No match */
}
-
+
static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
@@ -5479,7 +5479,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
ap = kzalloc(sizeof(*ap), GFP_KERNEL);
if (!ap)
return NULL;
-
+
ap->pflags |= ATA_PFLAG_INITIALIZING;
ap->lock = &host->lock;
ap->print_id = -1;
@@ -5887,21 +5887,9 @@ void ata_host_init(struct ata_host *host, struct device *dev,
host->ops = ops;
}
-
-static void async_port_probe(void *data, async_cookie_t cookie)
+int ata_port_probe(struct ata_port *ap)
{
- int rc;
- struct ata_port *ap = data;
-
- /*
- * If we're not allowed to scan this host in parallel,
- * we need to wait until all previous scans have completed
- * before going further.
- * Jeff Garzik says this is only within a controller, so we
- * don't need to wait for port 0, only for later ports.
- */
- if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
- async_synchronize_cookie(cookie);
+ int rc = 0;
/* probe */
if (ap->ops->error_handler) {
@@ -5927,23 +5915,33 @@ static void async_port_probe(void *data, async_cookie_t cookie)
DPRINTK("ata%u: bus probe begin\n", ap->print_id);
rc = ata_bus_probe(ap);
DPRINTK("ata%u: bus probe end\n", ap->print_id);
-
- if (rc) {
- /* FIXME: do something useful here?
- * Current libata behavior will
- * tear down everything when
- * the module is removed
- * or the h/w is unplugged.
- */
- }
}
+ return rc;
+}
+
+
+static void async_port_probe(void *data, async_cookie_t cookie)
+{
+ struct ata_port *ap = data;
+
+ /*
+ * If we're not allowed to scan this host in parallel,
+ * we need to wait until all previous scans have completed
+ * before going further.
+ * Jeff Garzik says this is only within a controller, so we
+ * don't need to wait for port 0, only for later ports.
+ */
+ if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
+ async_synchronize_cookie(cookie);
+
+ (void)ata_port_probe(ap);
/* in order to keep device order, we need to synchronize at this point */
async_synchronize_cookie(cookie);
ata_scsi_scan_host(ap, 1);
-
}
+
/**
* ata_host_register - register initialized ATA host
* @host: ATA host to register
@@ -5983,7 +5981,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
for (i = 0; i < host->n_ports; i++)
host->ports[i]->print_id = ata_print_id++;
-
+
/* Create associated sysfs transport objects */
for (i = 0; i < host->n_ports; i++) {
rc = ata_tport_add(host->dev,host->ports[i]);
@@ -6471,7 +6469,7 @@ static int __init ata_init(void)
ata_sff_exit();
rc = -ENOMEM;
goto err_out;
- }
+ }
printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
return 0;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 17a6378..df3f314 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -587,11 +587,43 @@ static void ata_eh_unload(struct ata_port *ap)
void ata_scsi_error(struct Scsi_Host *host)
{
struct ata_port *ap = ata_shost_to_port(host);
- int i;
unsigned long flags;
+ LIST_HEAD(eh_work_q);
DPRINTK("ENTER\n");
+ spin_lock_irqsave(host->host_lock, flags);
+ list_splice_init(&host->eh_cmd_q, &eh_work_q);
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ ata_scsi_cmd_error_handler(host, ap, &eh_work_q);
+
+ /* If we timed raced normal completion and there is nothing to
+ recover nr_timedout == 0 why exactly are we doing error recovery ? */
+ ata_scsi_port_error_handler(host, ap);
+
+ /* finish or retry handled scmd's and clean up */
+ WARN_ON(host->host_failed || !list_empty(&eh_work_q));
+
+ DPRINTK("EXIT\n");
+}
+
+/**
+ * ata_scsi_cmd_error_handler - error callback for a list of commands
+ * @host: scsi host containing the port
+ * @ap: ATA port within the host
+ * @eh_work_q: list of commands to process
+ *
+ * process the given list of commands and return those finished to the
+ * ap->eh_done_q. This function is the first part of the libata error
+ * handler which processes a given list of failed commands.
+ */
+void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
+ struct list_head *eh_work_q)
+{
+ int i;
+ unsigned long flags;
+
/* make sure sff pio task is not running */
ata_sff_flush_pio_task(ap);
@@ -627,7 +659,7 @@ void ata_scsi_error(struct Scsi_Host *host)
if (ap->ops->lost_interrupt)
ap->ops->lost_interrupt(ap);
- list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
+ list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
struct ata_queued_cmd *qc;
for (i = 0; i < ATA_MAX_QUEUE; i++) {
@@ -671,8 +703,20 @@ void ata_scsi_error(struct Scsi_Host *host)
} else
spin_unlock_wait(ap->lock);
- /* If we timed raced normal completion and there is nothing to
- recover nr_timedout == 0 why exactly are we doing error recovery ? */
+}
+EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
+
+/**
+ * ata_scsi_port_error_handler - recover the port after the commands
+ * @host: SCSI host containing the port
+ * @ap: the ATA port
+ *
+ * Handle the recovery of the port @ap after all the commands
+ * have been recovered.
+ */
+void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
+{
+ unsigned long flags;
/* invoke error handler */
if (ap->ops->error_handler) {
@@ -761,9 +805,6 @@ void ata_scsi_error(struct Scsi_Host *host)
ap->ops->eng_timeout(ap);
}
- /* finish or retry handled scmd's and clean up */
- WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
-
scsi_eh_flush_done_q(&ap->eh_done_q);
/* clean up */
@@ -784,9 +825,8 @@ void ata_scsi_error(struct Scsi_Host *host)
wake_up_all(&ap->eh_wait_q);
spin_unlock_irqrestore(ap->lock, flags);
-
- DPRINTK("EXIT\n");
}
+EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler);
/**
* ata_port_wait_eh - Wait for the currently pending EH to complete
@@ -1618,7 +1658,7 @@ static void ata_eh_analyze_serror(struct ata_link *link)
* host links. For disabled PMP links, only N bit is
* considered as X bit is left at 1 for link plugging.
*/
- if (link->lpm_policy != ATA_LPM_MAX_POWER)
+ if (link->lpm_policy > ATA_LPM_MAX_POWER)
hotplug_mask = 0; /* hotplug doesn't work w/ LPM */
else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 600f635..a834199 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2056,6 +2056,17 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
ATA_ID_SERNO_LEN);
num += ATA_ID_SERNO_LEN;
+ if (ata_id_has_wwn(args->id)) {
+ /* SAT defined lu world wide name */
+ /* piv=0, assoc=lu, code_set=binary, designator=NAA */
+ rbuf[num + 0] = 1;
+ rbuf[num + 1] = 3;
+ rbuf[num + 3] = ATA_ID_WWN_LEN;
+ num += 4;
+ ata_id_string(args->id, (unsigned char *) rbuf + num,
+ ATA_ID_WWN, ATA_ID_WWN_LEN);
+ num += ATA_ID_WWN_LEN;
+ }
rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */
return 0;
}
@@ -3759,7 +3770,7 @@ struct ata_port *ata_sas_port_alloc(struct ata_host *host,
return NULL;
ap->port_no = 0;
- ap->lock = shost->host_lock;
+ ap->lock = &host->lock;
ap->pio_mask = port_info->pio_mask;
ap->mwdma_mask = port_info->mwdma_mask;
ap->udma_mask = port_info->udma_mask;
@@ -3821,7 +3832,7 @@ int ata_sas_port_init(struct ata_port *ap)
if (!rc) {
ap->print_id = ata_print_id++;
- rc = ata_bus_probe(ap);
+ rc = ata_port_probe(ap);
}
return rc;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index af6141b..cf7acbc 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1302,6 +1302,18 @@ fsm_start:
}
EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
+void ata_sff_queue_work(struct work_struct *work)
+{
+ queue_work(ata_sff_wq, work);
+}
+EXPORT_SYMBOL_GPL(ata_sff_queue_work);
+
+void ata_sff_queue_delayed_work(struct delayed_work *dwork, unsigned long delay)
+{
+ queue_delayed_work(ata_sff_wq, dwork, delay);
+}
+EXPORT_SYMBOL_GPL(ata_sff_queue_delayed_work);
+
void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
{
struct ata_port *ap = link->ap;
@@ -1311,8 +1323,7 @@ void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
ap->sff_pio_task_link = link;
/* may fail if ata_sff_flush_pio_task() in progress */
- queue_delayed_work(ata_sff_wq, &ap->sff_pio_task,
- msecs_to_jiffies(delay));
+ ata_sff_queue_delayed_work(&ap->sff_pio_task, msecs_to_jiffies(delay));
}
EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
@@ -1336,7 +1347,7 @@ static void ata_sff_pio_task(struct work_struct *work)
u8 status;
int poll_next;
- BUG_ON(ap->sff_pio_task_link == NULL);
+ BUG_ON(ap->sff_pio_task_link == NULL);
/* qc can be NULL if timeout occurred */
qc = ata_qc_from_tag(ap, link->active_tag);
if (!qc) {
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index a9be110..773de97 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -103,6 +103,7 @@ extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
extern struct ata_port *ata_port_alloc(struct ata_host *host);
extern const char *sata_spd_string(unsigned int spd);
+extern int ata_port_probe(struct ata_port *ap);
/* libata-acpi.c */
#ifdef CONFIG_ATA_ACPI
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index c8d4703..91949d9 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -245,7 +245,7 @@ static struct ata_port_operations pacpi_ops = {
static int pacpi_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info info = {
- .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+ .flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
new file mode 100644
index 0000000..65cee74
--- /dev/null
+++ b/drivers/ata/pata_arasan_cf.c
@@ -0,0 +1,983 @@
+/*
+ * drivers/ata/pata_arasan_cf.c
+ *
+ * Arasan Compact Flash host controller source file
+ *
+ * Copyright (C) 2011 ST Microelectronics
+ * Viresh Kumar <viresh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/*
+ * The Arasan CompactFlash Device Controller IP core has three basic modes of
+ * operation: PC card ATA using I/O mode, PC card ATA using memory mode, PC card
+ * ATA using true IDE modes. This driver supports only True IDE mode currently.
+ *
+ * Arasan CF Controller shares global irq register with Arasan XD Controller.
+ *
+ * Tested on arch/arm/mach-spear13xx
+ */
+
+#include <linux/ata.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/libata.h>
+#include <linux/module.h>
+#include <linux/pata_arasan_cf_data.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#define DRIVER_NAME "arasan_cf"
+#define TIMEOUT msecs_to_jiffies(3000)
+
+/* Registers */
+/* CompactFlash Interface Status */
+#define CFI_STS 0x000
+ #define STS_CHG (1)
+ #define BIN_AUDIO_OUT (1 << 1)
+ #define CARD_DETECT1 (1 << 2)
+ #define CARD_DETECT2 (1 << 3)
+ #define INP_ACK (1 << 4)
+ #define CARD_READY (1 << 5)
+ #define IO_READY (1 << 6)
+ #define B16_IO_PORT_SEL (1 << 7)
+/* IRQ */
+#define IRQ_STS 0x004
+/* Interrupt Enable */
+#define IRQ_EN 0x008
+ #define CARD_DETECT_IRQ (1)
+ #define STATUS_CHNG_IRQ (1 << 1)
+ #define MEM_MODE_IRQ (1 << 2)
+ #define IO_MODE_IRQ (1 << 3)
+ #define TRUE_IDE_MODE_IRQ (1 << 8)
+ #define PIO_XFER_ERR_IRQ (1 << 9)
+ #define BUF_AVAIL_IRQ (1 << 10)
+ #define XFER_DONE_IRQ (1 << 11)
+ #define IGNORED_IRQS (STATUS_CHNG_IRQ | MEM_MODE_IRQ | IO_MODE_IRQ |\
+ TRUE_IDE_MODE_IRQ)
+ #define TRUE_IDE_IRQS (CARD_DETECT_IRQ | PIO_XFER_ERR_IRQ |\
+ BUF_AVAIL_IRQ | XFER_DONE_IRQ)
+/* Operation Mode */
+#define OP_MODE 0x00C
+ #define CARD_MODE_MASK (0x3)
+ #define MEM_MODE (0x0)
+ #define IO_MODE (0x1)
+ #define TRUE_IDE_MODE (0x2)
+
+ #define CARD_TYPE_MASK (1 << 2)
+ #define CF_CARD (0)
+ #define CF_PLUS_CARD (1 << 2)
+
+ #define CARD_RESET (1 << 3)
+ #define CFHOST_ENB (1 << 4)
+ #define OUTPUTS_TRISTATE (1 << 5)
+ #define ULTRA_DMA_ENB (1 << 8)
+ #define MULTI_WORD_DMA_ENB (1 << 9)
+ #define DRQ_BLOCK_SIZE_MASK (0x3 << 11)
+ #define DRQ_BLOCK_SIZE_512 (0)
+ #define DRQ_BLOCK_SIZE_1024 (1 << 11)
+ #define DRQ_BLOCK_SIZE_2048 (2 << 11)
+ #define DRQ_BLOCK_SIZE_4096 (3 << 11)
+/* CF Interface Clock Configuration */
+#define CLK_CFG 0x010
+ #define CF_IF_CLK_MASK (0XF)
+/* CF Timing Mode Configuration */
+#define TM_CFG 0x014
+ #define MEM_MODE_TIMING_MASK (0x3)
+ #define MEM_MODE_TIMING_250NS (0x0)
+ #define MEM_MODE_TIMING_120NS (0x1)
+ #define MEM_MODE_TIMING_100NS (0x2)
+ #define MEM_MODE_TIMING_80NS (0x3)
+
+ #define IO_MODE_TIMING_MASK (0x3 << 2)
+ #define IO_MODE_TIMING_250NS (0x0 << 2)
+ #define IO_MODE_TIMING_120NS (0x1 << 2)
+ #define IO_MODE_TIMING_100NS (0x2 << 2)
+ #define IO_MODE_TIMING_80NS (0x3 << 2)
+
+ #define TRUEIDE_PIO_TIMING_MASK (0x7 << 4)
+ #define TRUEIDE_PIO_TIMING_SHIFT 4
+
+ #define TRUEIDE_MWORD_DMA_TIMING_MASK (0x7 << 7)
+ #define TRUEIDE_MWORD_DMA_TIMING_SHIFT 7
+
+ #define ULTRA_DMA_TIMING_MASK (0x7 << 10)
+ #define ULTRA_DMA_TIMING_SHIFT 10
+/* CF Transfer Address */
+#define XFER_ADDR 0x014
+ #define XFER_ADDR_MASK (0x7FF)
+ #define MAX_XFER_COUNT 0x20000u
+/* Transfer Control */
+#define XFER_CTR 0x01C
+ #define XFER_COUNT_MASK (0x3FFFF)
+ #define ADDR_INC_DISABLE (1 << 24)
+ #define XFER_WIDTH_MASK (1 << 25)
+ #define XFER_WIDTH_8B (0)
+ #define XFER_WIDTH_16B (1 << 25)
+
+ #define MEM_TYPE_MASK (1 << 26)
+ #define MEM_TYPE_COMMON (0)
+ #define MEM_TYPE_ATTRIBUTE (1 << 26)
+
+ #define MEM_IO_XFER_MASK (1 << 27)
+ #define MEM_XFER (0)
+ #define IO_XFER (1 << 27)
+
+ #define DMA_XFER_MODE (1 << 28)
+
+ #define AHB_BUS_NORMAL_PIO_OPRTN (~(1 << 29))
+ #define XFER_DIR_MASK (1 << 30)
+ #define XFER_READ (0)
+ #define XFER_WRITE (1 << 30)
+
+ #define XFER_START (1 << 31)
+/* Write Data Port */
+#define WRITE_PORT 0x024
+/* Read Data Port */
+#define READ_PORT 0x028
+/* ATA Data Port */
+#define ATA_DATA_PORT 0x030
+ #define ATA_DATA_PORT_MASK (0xFFFF)
+/* ATA Error/Features */
+#define ATA_ERR_FTR 0x034
+/* ATA Sector Count */
+#define ATA_SC 0x038
+/* ATA Sector Number */
+#define ATA_SN 0x03C
+/* ATA Cylinder Low */
+#define ATA_CL 0x040
+/* ATA Cylinder High */
+#define ATA_CH 0x044
+/* ATA Select Card/Head */
+#define ATA_SH 0x048
+/* ATA Status-Command */
+#define ATA_STS_CMD 0x04C
+/* ATA Alternate Status/Device Control */
+#define ATA_ASTS_DCTR 0x050
+/* Extended Write Data Port 0x200-0x3FC */
+#define EXT_WRITE_PORT 0x200
+/* Extended Read Data Port 0x400-0x5FC */
+#define EXT_READ_PORT 0x400
+ #define FIFO_SIZE 0x200u
+/* Global Interrupt Status */
+#define GIRQ_STS 0x800
+/* Global Interrupt Status enable */
+#define GIRQ_STS_EN 0x804
+/* Global Interrupt Signal enable */
+#define GIRQ_SGN_EN 0x808
+ #define GIRQ_CF (1)
+ #define GIRQ_XD (1 << 1)
+
+/* Compact Flash Controller Dev Structure */
+struct arasan_cf_dev {
+ /* pointer to ata_host structure */
+ struct ata_host *host;
+ /* clk structure, only if HAVE_CLK is defined */
+#ifdef CONFIG_HAVE_CLK
+ struct clk *clk;
+#endif
+
+ /* physical base address of controller */
+ dma_addr_t pbase;
+ /* virtual base address of controller */
+ void __iomem *vbase;
+ /* irq number*/
+ int irq;
+
+ /* status to be updated to framework regarding DMA transfer */
+ u8 dma_status;
+ /* Card is present or Not */
+ u8 card_present;
+
+ /* dma specific */
+ /* Completion for transfer complete interrupt from controller */
+ struct completion cf_completion;
+ /* Completion for DMA transfer complete. */
+ struct completion dma_completion;
+ /* Dma channel allocated */
+ struct dma_chan *dma_chan;
+ /* Mask for DMA transfers */
+ dma_cap_mask_t mask;
+ /* dma channel private data */
+ void *dma_priv;
+ /* DMA transfer work */
+ struct work_struct work;
+ /* DMA delayed finish work */
+ struct delayed_work dwork;
+ /* qc to be transferred using DMA */
+ struct ata_queued_cmd *qc;
+};
+
+static struct scsi_host_template arasan_cf_sht = {
+ ATA_BASE_SHT(DRIVER_NAME),
+ .sg_tablesize = SG_NONE,
+ .dma_boundary = 0xFFFFFFFFUL,
+};
+
+static void cf_dumpregs(struct arasan_cf_dev *acdev)
+{
+ struct device *dev = acdev->host->dev;
+
+ dev_dbg(dev, ": =========== REGISTER DUMP ===========");
+ dev_dbg(dev, ": CFI_STS: %x", readl(acdev->vbase + CFI_STS));
+ dev_dbg(dev, ": IRQ_STS: %x", readl(acdev->vbase + IRQ_STS));
+ dev_dbg(dev, ": IRQ_EN: %x", readl(acdev->vbase + IRQ_EN));
+ dev_dbg(dev, ": OP_MODE: %x", readl(acdev->vbase + OP_MODE));
+ dev_dbg(dev, ": CLK_CFG: %x", readl(acdev->vbase + CLK_CFG));
+ dev_dbg(dev, ": TM_CFG: %x", readl(acdev->vbase + TM_CFG));
+ dev_dbg(dev, ": XFER_CTR: %x", readl(acdev->vbase + XFER_CTR));
+ dev_dbg(dev, ": GIRQ_STS: %x", readl(acdev->vbase + GIRQ_STS));
+ dev_dbg(dev, ": GIRQ_STS_EN: %x", readl(acdev->vbase + GIRQ_STS_EN));
+ dev_dbg(dev, ": GIRQ_SGN_EN: %x", readl(acdev->vbase + GIRQ_SGN_EN));
+ dev_dbg(dev, ": =====================================");
+}
+
+/* Enable/Disable global interrupts shared between CF and XD ctrlr. */
+static void cf_ginterrupt_enable(struct arasan_cf_dev *acdev, bool enable)
+{
+ /* enable should be 0 or 1 */
+ writel(enable, acdev->vbase + GIRQ_STS_EN);
+ writel(enable, acdev->vbase + GIRQ_SGN_EN);
+}
+
+/* Enable/Disable CF interrupts */
+static inline void
+cf_interrupt_enable(struct arasan_cf_dev *acdev, u32 mask, bool enable)
+{
+ u32 val = readl(acdev->vbase + IRQ_EN);
+ /* clear & enable/disable irqs */
+ if (enable) {
+ writel(mask, acdev->vbase + IRQ_STS);
+ writel(val | mask, acdev->vbase + IRQ_EN);
+ } else
+ writel(val & ~mask, acdev->vbase + IRQ_EN);
+}
+
+static inline void cf_card_reset(struct arasan_cf_dev *acdev)
+{
+ u32 val = readl(acdev->vbase + OP_MODE);
+
+ writel(val | CARD_RESET, acdev->vbase + OP_MODE);
+ udelay(200);
+ writel(val & ~CARD_RESET, acdev->vbase + OP_MODE);
+}
+
+static inline void cf_ctrl_reset(struct arasan_cf_dev *acdev)
+{
+ writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
+ acdev->vbase + OP_MODE);
+ writel(readl(acdev->vbase + OP_MODE) | CFHOST_ENB,
+ acdev->vbase + OP_MODE);
+}
+
+static void cf_card_detect(struct arasan_cf_dev *acdev, bool hotplugged)
+{
+ struct ata_port *ap = acdev->host->ports[0];
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ u32 val = readl(acdev->vbase + CFI_STS);
+
+ /* Both CD1 & CD2 should be low if card inserted completely */
+ if (!(val & (CARD_DETECT1 | CARD_DETECT2))) {
+ if (acdev->card_present)
+ return;
+ acdev->card_present = 1;
+ cf_card_reset(acdev);
+ } else {
+ if (!acdev->card_present)
+ return;
+ acdev->card_present = 0;
+ }
+
+ if (hotplugged) {
+ ata_ehi_hotplugged(ehi);
+ ata_port_freeze(ap);
+ }
+}
+
+static int cf_init(struct arasan_cf_dev *acdev)
+{
+ struct arasan_cf_pdata *pdata = dev_get_platdata(acdev->host->dev);
+ unsigned long flags;
+ int ret = 0;
+
+#ifdef CONFIG_HAVE_CLK
+ ret = clk_enable(acdev->clk);
+ if (ret) {
+ dev_dbg(acdev->host->dev, "clock enable failed");
+ return ret;
+ }
+#endif
+
+ spin_lock_irqsave(&acdev->host->lock, flags);
+ /* configure CF interface clock */
+ writel((pdata->cf_if_clk <= CF_IF_CLK_200M) ? pdata->cf_if_clk :
+ CF_IF_CLK_166M, acdev->vbase + CLK_CFG);
+
+ writel(TRUE_IDE_MODE | CFHOST_ENB, acdev->vbase + OP_MODE);
+ cf_interrupt_enable(acdev, CARD_DETECT_IRQ, 1);
+ cf_ginterrupt_enable(acdev, 1);
+ spin_unlock_irqrestore(&acdev->host->lock, flags);
+
+ return ret;
+}
+
+static void cf_exit(struct arasan_cf_dev *acdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&acdev->host->lock, flags);
+ cf_ginterrupt_enable(acdev, 0);
+ cf_interrupt_enable(acdev, TRUE_IDE_IRQS, 0);
+ cf_card_reset(acdev);
+ writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
+ acdev->vbase + OP_MODE);
+ spin_unlock_irqrestore(&acdev->host->lock, flags);
+#ifdef CONFIG_HAVE_CLK
+ clk_disable(acdev->clk);
+#endif
+}
+
+static void dma_callback(void *dev)
+{
+ struct arasan_cf_dev *acdev = (struct arasan_cf_dev *) dev;
+
+ complete(&acdev->dma_completion);
+}
+
+static bool filter(struct dma_chan *chan, void *slave)
+{
+ chan->private = slave;
+ return true;
+}
+
+static inline void dma_complete(struct arasan_cf_dev *acdev)
+{
+ struct ata_queued_cmd *qc = acdev->qc;
+ unsigned long flags;
+
+ acdev->qc = NULL;
+ ata_sff_interrupt(acdev->irq, acdev->host);
+
+ spin_lock_irqsave(&acdev->host->lock, flags);
+ if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
+ ata_ehi_push_desc(&qc->ap->link.eh_info, "DMA Failed: Timeout");
+ spin_unlock_irqrestore(&acdev->host->lock, flags);
+}
+
+static inline int wait4buf(struct arasan_cf_dev *acdev)
+{
+ if (!wait_for_completion_timeout(&acdev->cf_completion, TIMEOUT)) {
+ u32 rw = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
+
+ dev_err(acdev->host->dev, "%s TimeOut", rw ? "write" : "read");
+ return -ETIMEDOUT;
+ }
+
+ /* Check if PIO Error interrupt has occured */
+ if (acdev->dma_status & ATA_DMA_ERR)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int
+dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len)
+{
+ struct dma_async_tx_descriptor *tx;
+ struct dma_chan *chan = acdev->dma_chan;
+ dma_cookie_t cookie;
+ unsigned long flags = DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP |
+ DMA_COMPL_SKIP_DEST_UNMAP;
+ int ret = 0;
+
+ tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags);
+ if (!tx) {
+ dev_err(acdev->host->dev, "device_prep_dma_memcpy failed\n");
+ return -EAGAIN;
+ }
+
+ tx->callback = dma_callback;
+ tx->callback_param = acdev;
+ cookie = tx->tx_submit(tx);
+
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(acdev->host->dev, "dma_submit_error\n");
+ return ret;
+ }
+
+ chan->device->device_issue_pending(chan);
+
+ /* Wait for DMA to complete */
+ if (!wait_for_completion_timeout(&acdev->dma_completion, TIMEOUT)) {
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dev_err(acdev->host->dev, "wait_for_completion_timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+static int sg_xfer(struct arasan_cf_dev *acdev, struct scatterlist *sg)
+{
+ dma_addr_t dest = 0, src = 0;
+ u32 xfer_cnt, sglen, dma_len, xfer_ctr;
+ u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
+ unsigned long flags;
+ int ret = 0;
+
+ sglen = sg_dma_len(sg);
+ if (write) {
+ src = sg_dma_address(sg);
+ dest = acdev->pbase + EXT_WRITE_PORT;
+ } else {
+ dest = sg_dma_address(sg);
+ src = acdev->pbase + EXT_READ_PORT;
+ }
+
+ /*
+ * For each sg:
+ * MAX_XFER_COUNT data will be transferred before we get transfer
+ * complete interrupt. Inbetween after FIFO_SIZE data
+ * buffer available interrupt will be generated. At this time we will
+ * fill FIFO again: max FIFO_SIZE data.
+ */
+ while (sglen) {
+ xfer_cnt = min(sglen, MAX_XFER_COUNT);
+ spin_lock_irqsave(&acdev->host->lock, flags);
+ xfer_ctr = readl(acdev->vbase + XFER_CTR) &
+ ~XFER_COUNT_MASK;
+ writel(xfer_ctr | xfer_cnt | XFER_START,
+ acdev->vbase + XFER_CTR);
+ spin_unlock_irqrestore(&acdev->host->lock, flags);
+
+ /* continue dma xfers untill current sg is completed */
+ while (xfer_cnt) {
+ /* wait for read to complete */
+ if (!write) {
+ ret = wait4buf(acdev);
+ if (ret)
+ goto fail;
+ }
+
+ /* read/write FIFO in chunk of FIFO_SIZE */
+ dma_len = min(xfer_cnt, FIFO_SIZE);
+ ret = dma_xfer(acdev, src, dest, dma_len);
+ if (ret) {
+ dev_err(acdev->host->dev, "dma failed");
+ goto fail;
+ }
+
+ if (write)
+ src += dma_len;
+ else
+ dest += dma_len;
+
+ sglen -= dma_len;
+ xfer_cnt -= dma_len;
+
+ /* wait for write to complete */
+ if (write) {
+ ret = wait4buf(acdev);
+ if (ret)
+ goto fail;
+ }
+ }
+ }
+
+fail:
+ spin_lock_irqsave(&acdev->host->lock, flags);
+ writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
+ acdev->vbase + XFER_CTR);
+ spin_unlock_irqrestore(&acdev->host->lock, flags);
+
+ return ret;
+}
+
+/*
+ * This routine uses External DMA controller to read/write data to FIFO of CF
+ * controller. There are two xfer related interrupt supported by CF controller:
+ * - buf_avail: This interrupt is generated as soon as we have buffer of 512
+ * bytes available for reading or empty buffer available for writing.
+ * - xfer_done: This interrupt is generated on transfer of "xfer_size" amount of
+ * data to/from FIFO. xfer_size is programmed in XFER_CTR register.
+ *
+ * Max buffer size = FIFO_SIZE = 512 Bytes.
+ * Max xfer_size = MAX_XFER_COUNT = 256 KB.
+ */
+static void data_xfer(struct work_struct *work)
+{
+ struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
+ work);
+ struct ata_queued_cmd *qc = acdev->qc;
+ struct scatterlist *sg;
+ unsigned long flags;
+ u32 temp;
+ int ret = 0;
+
+ /* request dma channels */
+ /* dma_request_channel may sleep, so calling from process context */
+ acdev->dma_chan = dma_request_channel(acdev->mask, filter,
+ acdev->dma_priv);
+ if (!acdev->dma_chan) {
+ dev_err(acdev->host->dev, "Unable to get dma_chan\n");
+ goto chan_request_fail;
+ }
+
+ for_each_sg(qc->sg, sg, qc->n_elem, temp) {
+ ret = sg_xfer(acdev, sg);
+ if (ret)
+ break;
+ }
+
+ dma_release_channel(acdev->dma_chan);
+
+ /* data xferred successfully */
+ if (!ret) {
+ u32 status;
+
+ spin_lock_irqsave(&acdev->host->lock, flags);
+ status = ioread8(qc->ap->ioaddr.altstatus_addr);
+ spin_unlock_irqrestore(&acdev->host->lock, flags);
+ if (status & (ATA_BUSY | ATA_DRQ)) {
+ ata_sff_queue_delayed_work(&acdev->dwork, 1);
+ return;
+ }
+
+ goto sff_intr;
+ }
+
+ cf_dumpregs(acdev);
+
+chan_request_fail:
+ spin_lock_irqsave(&acdev->host->lock, flags);
+ /* error when transfering data to/from memory */
+ qc->err_mask |= AC_ERR_HOST_BUS;
+ qc->ap->hsm_task_state = HSM_ST_ERR;
+
+ cf_ctrl_reset(acdev);
+ spin_unlock_irqrestore(qc->ap->lock, flags);
+sff_intr:
+ dma_complete(acdev);
+}
+
+static void delayed_finish(struct work_struct *work)
+{
+ struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
+ dwork.work);
+ struct ata_queued_cmd *qc = acdev->qc;
+ unsigned long flags;
+ u8 status;
+
+ spin_lock_irqsave(&acdev->host->lock, flags);
+ status = ioread8(qc->ap->ioaddr.altstatus_addr);
+ spin_unlock_irqrestore(&acdev->host->lock, flags);
+
+ if (status & (ATA_BUSY | ATA_DRQ))
+ ata_sff_queue_delayed_work(&acdev->dwork, 1);
+ else
+ dma_complete(acdev);
+}
+
+static irqreturn_t arasan_cf_interrupt(int irq, void *dev)
+{
+ struct arasan_cf_dev *acdev = ((struct ata_host *)dev)->private_data;
+ unsigned long flags;
+ u32 irqsts;
+
+ irqsts = readl(acdev->vbase + GIRQ_STS);
+ if (!(irqsts & GIRQ_CF))
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&acdev->host->lock, flags);
+ irqsts = readl(acdev->vbase + IRQ_STS);
+ writel(irqsts, acdev->vbase + IRQ_STS); /* clear irqs */
+ writel(GIRQ_CF, acdev->vbase + GIRQ_STS); /* clear girqs */
+
+ /* handle only relevant interrupts */
+ irqsts &= ~IGNORED_IRQS;
+
+ if (irqsts & CARD_DETECT_IRQ) {
+ cf_card_detect(acdev, 1);
+ spin_unlock_irqrestore(&acdev->host->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ if (irqsts & PIO_XFER_ERR_IRQ) {
+ acdev->dma_status = ATA_DMA_ERR;
+ writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
+ acdev->vbase + XFER_CTR);
+ spin_unlock_irqrestore(&acdev->host->lock, flags);
+ complete(&acdev->cf_completion);
+ dev_err(acdev->host->dev, "pio xfer err irq\n");
+ return IRQ_HANDLED;
+ }
+
+ spin_unlock_irqrestore(&acdev->host->lock, flags);
+
+ if (irqsts & BUF_AVAIL_IRQ) {
+ complete(&acdev->cf_completion);
+ return IRQ_HANDLED;
+ }
+
+ if (irqsts & XFER_DONE_IRQ) {
+ struct ata_queued_cmd *qc = acdev->qc;
+
+ /* Send Complete only for write */
+ if (qc->tf.flags & ATA_TFLAG_WRITE)
+ complete(&acdev->cf_completion);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void arasan_cf_freeze(struct ata_port *ap)
+{
+ struct arasan_cf_dev *acdev = ap->host->private_data;
+
+ /* stop transfer and reset controller */
+ writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
+ acdev->vbase + XFER_CTR);
+ cf_ctrl_reset(acdev);
+ acdev->dma_status = ATA_DMA_ERR;
+
+ ata_sff_dma_pause(ap);
+ ata_sff_freeze(ap);
+}
+
+void arasan_cf_error_handler(struct ata_port *ap)
+{
+ struct arasan_cf_dev *acdev = ap->host->private_data;
+
+ /*
+ * DMA transfers using an external DMA controller may be scheduled.
+ * Abort them before handling error. Refer data_xfer() for further
+ * details.
+ */
+ cancel_work_sync(&acdev->work);
+ cancel_delayed_work_sync(&acdev->dwork);
+ return ata_sff_error_handler(ap);
+}
+
+static void arasan_cf_dma_start(struct arasan_cf_dev *acdev)
+{
+ u32 xfer_ctr = readl(acdev->vbase + XFER_CTR) & ~XFER_DIR_MASK;
+ u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
+
+ xfer_ctr |= write ? XFER_WRITE : XFER_READ;
+ writel(xfer_ctr, acdev->vbase + XFER_CTR);
+
+ acdev->qc->ap->ops->sff_exec_command(acdev->qc->ap, &acdev->qc->tf);
+ ata_sff_queue_work(&acdev->work);
+}
+
+unsigned int arasan_cf_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct arasan_cf_dev *acdev = ap->host->private_data;
+
+ /* defer PIO handling to sff_qc_issue */
+ if (!ata_is_dma(qc->tf.protocol))
+ return ata_sff_qc_issue(qc);
+
+ /* select the device */
+ ata_wait_idle(ap);
+ ata_sff_dev_select(ap, qc->dev->devno);
+ ata_wait_idle(ap);
+
+ /* start the command */
+ switch (qc->tf.protocol) {
+ case ATA_PROT_DMA:
+ WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
+
+ ap->ops->sff_tf_load(ap, &qc->tf);
+ acdev->dma_status = 0;
+ acdev->qc = qc;
+ arasan_cf_dma_start(acdev);
+ ap->hsm_task_state = HSM_ST_LAST;
+ break;
+
+ default:
+ WARN_ON(1);
+ return AC_ERR_SYSTEM;
+ }
+
+ return 0;
+}
+
+static void arasan_cf_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct arasan_cf_dev *acdev = ap->host->private_data;
+ u8 pio = adev->pio_mode - XFER_PIO_0;
+ unsigned long flags;
+ u32 val;
+
+ /* Arasan ctrl supports Mode0 -> Mode6 */
+ if (pio > 6) {
+ dev_err(ap->dev, "Unknown PIO mode\n");
+ return;
+ }
+
+ spin_lock_irqsave(&acdev->host->lock, flags);
+ val = readl(acdev->vbase + OP_MODE) &
+ ~(ULTRA_DMA_ENB | MULTI_WORD_DMA_ENB | DRQ_BLOCK_SIZE_MASK);
+ writel(val, acdev->vbase + OP_MODE);
+ val = readl(acdev->vbase + TM_CFG) & ~TRUEIDE_PIO_TIMING_MASK;
+ val |= pio << TRUEIDE_PIO_TIMING_SHIFT;
+ writel(val, acdev->vbase + TM_CFG);
+
+ cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 0);
+ cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 1);
+ spin_unlock_irqrestore(&acdev->host->lock, flags);
+}
+
+static void arasan_cf_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct arasan_cf_dev *acdev = ap->host->private_data;
+ u32 opmode, tmcfg, dma_mode = adev->dma_mode;
+ unsigned long flags;
+
+ spin_lock_irqsave(&acdev->host->lock, flags);
+ opmode = readl(acdev->vbase + OP_MODE) &
+ ~(MULTI_WORD_DMA_ENB | ULTRA_DMA_ENB);
+ tmcfg = readl(acdev->vbase + TM_CFG);
+
+ if ((dma_mode >= XFER_UDMA_0) && (dma_mode <= XFER_UDMA_6)) {
+ opmode |= ULTRA_DMA_ENB;
+ tmcfg &= ~ULTRA_DMA_TIMING_MASK;
+ tmcfg |= (dma_mode - XFER_UDMA_0) << ULTRA_DMA_TIMING_SHIFT;
+ } else if ((dma_mode >= XFER_MW_DMA_0) && (dma_mode <= XFER_MW_DMA_4)) {
+ opmode |= MULTI_WORD_DMA_ENB;
+ tmcfg &= ~TRUEIDE_MWORD_DMA_TIMING_MASK;
+ tmcfg |= (dma_mode - XFER_MW_DMA_0) <<
+ TRUEIDE_MWORD_DMA_TIMING_SHIFT;
+ } else {
+ dev_err(ap->dev, "Unknown DMA mode\n");
+ spin_unlock_irqrestore(&acdev->host->lock, flags);
+ return;
+ }
+
+ writel(opmode, acdev->vbase + OP_MODE);
+ writel(tmcfg, acdev->vbase + TM_CFG);
+ writel(DMA_XFER_MODE, acdev->vbase + XFER_CTR);
+
+ cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 0);
+ cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 1);
+ spin_unlock_irqrestore(&acdev->host->lock, flags);
+}
+
+static struct ata_port_operations arasan_cf_ops = {
+ .inherits = &ata_sff_port_ops,
+ .freeze = arasan_cf_freeze,
+ .error_handler = arasan_cf_error_handler,
+ .qc_issue = arasan_cf_qc_issue,
+ .set_piomode = arasan_cf_set_piomode,
+ .set_dmamode = arasan_cf_set_dmamode,
+};
+
+static int __devinit arasan_cf_probe(struct platform_device *pdev)
+{
+ struct arasan_cf_dev *acdev;
+ struct arasan_cf_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct ata_host *host;
+ struct ata_port *ap;
+ struct resource *res;
+ irq_handler_t irq_handler = NULL;
+ int ret = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
+ DRIVER_NAME)) {
+ dev_warn(&pdev->dev, "Failed to get memory region resource\n");
+ return -ENOENT;
+ }
+
+ acdev = devm_kzalloc(&pdev->dev, sizeof(*acdev), GFP_KERNEL);
+ if (!acdev) {
+ dev_warn(&pdev->dev, "kzalloc fail\n");
+ return -ENOMEM;
+ }
+
+ /* if irq is 0, support only PIO */
+ acdev->irq = platform_get_irq(pdev, 0);
+ if (acdev->irq)
+ irq_handler = arasan_cf_interrupt;
+ else
+ pdata->quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
+
+ acdev->pbase = res->start;
+ acdev->vbase = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!acdev->vbase) {
+ dev_warn(&pdev->dev, "ioremap fail\n");
+ return -ENOMEM;
+ }
+
+#ifdef CONFIG_HAVE_CLK
+ acdev->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(acdev->clk)) {
+ dev_warn(&pdev->dev, "Clock not found\n");
+ return PTR_ERR(acdev->clk);
+ }
+#endif
+
+ /* allocate host */
+ host = ata_host_alloc(&pdev->dev, 1);
+ if (!host) {
+ ret = -ENOMEM;
+ dev_warn(&pdev->dev, "alloc host fail\n");
+ goto free_clk;
+ }
+
+ ap = host->ports[0];
+ host->private_data = acdev;
+ acdev->host = host;
+ ap->ops = &arasan_cf_ops;
+ ap->pio_mask = ATA_PIO6;
+ ap->mwdma_mask = ATA_MWDMA4;
+ ap->udma_mask = ATA_UDMA6;
+
+ init_completion(&acdev->cf_completion);
+ init_completion(&acdev->dma_completion);
+ INIT_WORK(&acdev->work, data_xfer);
+ INIT_DELAYED_WORK(&acdev->dwork, delayed_finish);
+ dma_cap_set(DMA_MEMCPY, acdev->mask);
+ acdev->dma_priv = pdata->dma_priv;
+
+ /* Handle platform specific quirks */
+ if (pdata->quirk) {
+ if (pdata->quirk & CF_BROKEN_PIO) {
+ ap->ops->set_piomode = NULL;
+ ap->pio_mask = 0;
+ }
+ if (pdata->quirk & CF_BROKEN_MWDMA)
+ ap->mwdma_mask = 0;
+ if (pdata->quirk & CF_BROKEN_UDMA)
+ ap->udma_mask = 0;
+ }
+ ap->flags |= ATA_FLAG_PIO_POLLING | ATA_FLAG_NO_ATAPI;
+
+ ap->ioaddr.cmd_addr = acdev->vbase + ATA_DATA_PORT;
+ ap->ioaddr.data_addr = acdev->vbase + ATA_DATA_PORT;
+ ap->ioaddr.error_addr = acdev->vbase + ATA_ERR_FTR;
+ ap->ioaddr.feature_addr = acdev->vbase + ATA_ERR_FTR;
+ ap->ioaddr.nsect_addr = acdev->vbase + ATA_SC;
+ ap->ioaddr.lbal_addr = acdev->vbase + ATA_SN;
+ ap->ioaddr.lbam_addr = acdev->vbase + ATA_CL;
+ ap->ioaddr.lbah_addr = acdev->vbase + ATA_CH;
+ ap->ioaddr.device_addr = acdev->vbase + ATA_SH;
+ ap->ioaddr.status_addr = acdev->vbase + ATA_STS_CMD;
+ ap->ioaddr.command_addr = acdev->vbase + ATA_STS_CMD;
+ ap->ioaddr.altstatus_addr = acdev->vbase + ATA_ASTS_DCTR;
+ ap->ioaddr.ctl_addr = acdev->vbase + ATA_ASTS_DCTR;
+
+ ata_port_desc(ap, "phy_addr %llx virt_addr %p",
+ (unsigned long long) res->start, acdev->vbase);
+
+ ret = cf_init(acdev);
+ if (ret)
+ goto free_clk;
+
+ cf_card_detect(acdev, 0);
+
+ return ata_host_activate(host, acdev->irq, irq_handler, 0,
+ &arasan_cf_sht);
+
+free_clk:
+#ifdef CONFIG_HAVE_CLK
+ clk_put(acdev->clk);
+#endif
+ return ret;
+}
+
+static int __devexit arasan_cf_remove(struct platform_device *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct arasan_cf_dev *acdev = host->ports[0]->private_data;
+
+ ata_host_detach(host);
+ cf_exit(acdev);
+#ifdef CONFIG_HAVE_CLK
+ clk_put(acdev->clk);
+#endif
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int arasan_cf_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct arasan_cf_dev *acdev = host->ports[0]->private_data;
+
+ if (acdev->dma_chan) {
+ acdev->dma_chan->device->device_control(acdev->dma_chan,
+ DMA_TERMINATE_ALL, 0);
+ dma_release_channel(acdev->dma_chan);
+ }
+ cf_exit(acdev);
+ return ata_host_suspend(host, PMSG_SUSPEND);
+}
+
+static int arasan_cf_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct arasan_cf_dev *acdev = host->ports[0]->private_data;
+
+ cf_init(acdev);
+ ata_host_resume(host);
+
+ return 0;
+}
+
+static const struct dev_pm_ops arasan_cf_pm_ops = {
+ .suspend = arasan_cf_suspend,
+ .resume = arasan_cf_resume,
+};
+#endif
+
+static struct platform_driver arasan_cf_driver = {
+ .probe = arasan_cf_probe,
+ .remove = __devexit_p(arasan_cf_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &arasan_cf_pm_ops,
+#endif
+ },
+};
+
+static int __init arasan_cf_init(void)
+{
+ return platform_driver_register(&arasan_cf_driver);
+}
+module_init(arasan_cf_init);
+
+static void __exit arasan_cf_exit(void)
+{
+ platform_driver_unregister(&arasan_cf_driver);
+}
+module_exit(arasan_cf_exit);
+
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
index 66ce6a5..36f189c 100644
--- a/drivers/ata/pata_at32.c
+++ b/drivers/ata/pata_at32.c
@@ -194,7 +194,7 @@ static int __init pata_at32_init_one(struct device *dev,
/* Setup ATA bindings */
ap->ops = &at32_port_ops;
ap->pio_mask = PIO_MASK;
- ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_SLAVE_POSS;
+ ap->flags |= ATA_FLAG_SLAVE_POSS;
/*
* Since all 8-bit taskfile transfers has to go on the lower
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index 7aed5c7..e0b58b8 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -1454,9 +1454,7 @@ static struct ata_port_operations bfin_pata_ops = {
static struct ata_port_info bfin_port_info[] = {
{
- .flags = ATA_FLAG_SLAVE_POSS
- | ATA_FLAG_MMIO
- | ATA_FLAG_NO_LEGACY,
+ .flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = 0,
.udma_mask = 0,
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 538ec38..6c77d68 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -14,6 +14,7 @@
* Look into engine reset on timeout errors. Should not be required.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
@@ -25,7 +26,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_hpt366"
-#define DRV_VERSION "0.6.10"
+#define DRV_VERSION "0.6.11"
struct hpt_clock {
u8 xfer_mode;
@@ -160,8 +161,8 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
while (list[i] != NULL) {
if (!strcmp(list[i], model_num)) {
- pr_warning(DRV_NAME ": %s is not supported for %s.\n",
- modestr, list[i]);
+ pr_warn("%s is not supported for %s\n",
+ modestr, list[i]);
return 1;
}
i++;
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 4c5b518..9620636 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -14,6 +14,8 @@
* Look into engine reset on timeout errors. Should not be required.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -24,7 +26,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_hpt37x"
-#define DRV_VERSION "0.6.22"
+#define DRV_VERSION "0.6.23"
struct hpt_clock {
u8 xfer_speed;
@@ -229,8 +231,8 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
while (list[i] != NULL) {
if (!strcmp(list[i], model_num)) {
- pr_warning(DRV_NAME ": %s is not supported for %s.\n",
- modestr, list[i]);
+ pr_warn("%s is not supported for %s\n",
+ modestr, list[i]);
return 1;
}
i++;
@@ -863,8 +865,8 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
chip_table = &hpt372;
break;
default:
- pr_err(DRV_NAME ": Unknown HPT366 subtype, "
- "please report (%d).\n", rev);
+ pr_err("Unknown HPT366 subtype, please report (%d)\n",
+ rev);
return -ENODEV;
}
break;
@@ -904,8 +906,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
*ppi = &info_hpt374_fn1;
break;
default:
- pr_err(DRV_NAME ": PCI table is bogus, please report (%d).\n",
- dev->device);
+ pr_err("PCI table is bogus, please report (%d)\n", dev->device);
return -ENODEV;
}
/* Ok so this is a chip we support */
@@ -953,7 +954,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
u8 sr;
u32 total = 0;
- pr_warning(DRV_NAME ": BIOS has not set timing clocks.\n");
+ pr_warn("BIOS has not set timing clocks\n");
/* This is the process the HPT371 BIOS is reported to use */
for (i = 0; i < 128; i++) {
@@ -1009,7 +1010,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
(f_high << 16) | f_low | 0x100);
}
if (adjust == 8) {
- pr_err(DRV_NAME ": DPLL did not stabilize!\n");
+ pr_err("DPLL did not stabilize!\n");
return -ENODEV;
}
if (dpll == 3)
@@ -1017,7 +1018,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
else
private_data = (void *)hpt37x_timings_50;
- pr_info(DRV_NAME ": bus clock %dMHz, using %dMHz DPLL.\n",
+ pr_info("bus clock %dMHz, using %dMHz DPLL\n",
MHz[clock_slot], MHz[dpll]);
} else {
private_data = (void *)chip_table->clocks[clock_slot];
@@ -1032,7 +1033,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
if (clock_slot < 2 && ppi[0] == &info_hpt370a)
ppi[0] = &info_hpt370a_33;
- pr_info(DRV_NAME ": %s using %dMHz bus clock.\n",
+ pr_info("%s using %dMHz bus clock\n",
chip_table->name, MHz[clock_slot]);
}
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index eca68ca..765f136 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -15,6 +15,8 @@
* Work out best PLL policy
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -25,7 +27,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_hpt3x2n"
-#define DRV_VERSION "0.3.14"
+#define DRV_VERSION "0.3.15"
enum {
HPT_PCI_FAST = (1 << 31),
@@ -418,7 +420,7 @@ static int hpt3x2n_pci_clock(struct pci_dev *pdev)
u16 sr;
u32 total = 0;
- pr_warning(DRV_NAME ": BIOS clock data not set.\n");
+ pr_warn("BIOS clock data not set\n");
/* This is the process the HPT371 BIOS is reported to use */
for (i = 0; i < 128; i++) {
@@ -528,8 +530,7 @@ hpt372n:
ppi[0] = &info_hpt372n;
break;
default:
- pr_err(DRV_NAME ": PCI table is bogus, please report (%d).\n",
- dev->device);
+ pr_err("PCI table is bogus, please report (%d)\n", dev->device);
return -ENODEV;
}
@@ -578,11 +579,11 @@ hpt372n:
pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
}
if (adjust == 8) {
- pr_err(DRV_NAME ": DPLL did not stabilize!\n");
+ pr_err("DPLL did not stabilize!\n");
return -ENODEV;
}
- pr_info(DRV_NAME ": bus clock %dMHz, using 66MHz DPLL.\n", pci_mhz);
+ pr_info("bus clock %dMHz, using 66MHz DPLL\n", pci_mhz);
/*
* Set our private data up. We only need a few flags
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index b63d5e2..24d7df8 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -151,7 +151,7 @@ static struct ata_port_operations hpt3x3_port_ops = {
.check_atapi_dma= hpt3x3_atapi_dma,
.freeze = hpt3x3_freeze,
#endif
-
+
};
/**
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index aa0e0c5..2d15f25 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -616,7 +616,7 @@ static void it821x_display_disk(int n, u8 *buf)
if (buf[52] > 4) /* No Disk */
return;
- ata_id_c_string((u16 *)buf, id, 0, 41);
+ ata_id_c_string((u16 *)buf, id, 0, 41);
if (buf[51]) {
mode = ffs(buf[51]);
@@ -910,7 +910,7 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
rc = pcim_enable_device(pdev);
if (rc)
return rc;
-
+
if (pdev->vendor == PCI_VENDOR_ID_RDC) {
/* Deal with Vortex86SX */
if (pdev->revision == 0x11)
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index ba54b08..5253b27 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -177,7 +177,7 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
ap->ops = &ixp4xx_port_ops;
ap->pio_mask = ATA_PIO4;
- ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY | ATA_FLAG_NO_ATAPI;
+ ap->flags |= ATA_FLAG_NO_ATAPI;
ixp4xx_setup_port(ap, data, cs0->start, cs1->start);
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 75b49d0..46f589e 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -1053,8 +1053,7 @@ static int __devinit pata_macio_common_init(struct pata_macio_priv *priv,
/* Allocate libata host for 1 port */
memset(&pinfo, 0, sizeof(struct ata_port_info));
pmac_macio_calc_timing_masks(priv, &pinfo);
- pinfo.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_MMIO |
- ATA_FLAG_NO_LEGACY;
+ pinfo.flags = ATA_FLAG_SLAVE_POSS;
pinfo.port_ops = &pata_macio_ops;
pinfo.private_data = priv;
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index dd38083..75a6a0c 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -38,7 +38,7 @@ static int marvell_pata_active(struct pci_dev *pdev)
/* We don't yet know how to do this for other devices */
if (pdev->device != 0x6145)
- return 1;
+ return 1;
barp = pci_iomap(pdev, 5, 0x10);
if (barp == NULL)
diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
index cc50bd0..e277a14 100644
--- a/drivers/ata/pata_ninja32.c
+++ b/drivers/ata/pata_ninja32.c
@@ -165,7 +165,7 @@ static int ninja32_reinit_one(struct pci_dev *pdev)
return rc;
ninja32_program(host->iomap[0]);
ata_host_resume(host);
- return 0;
+ return 0;
}
#endif
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index fa1b95a..220ddc9 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -848,8 +848,7 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
cf_port->ap = ap;
ap->ops = &octeon_cf_ops;
ap->pio_mask = ATA_PIO6;
- ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY
- | ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
+ ap->flags |= ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
base = cs0 + ocd->base_region_bias;
if (!ocd->is16bit) {
diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
index 11fb4cc..a2a73d9 100644
--- a/drivers/ata/pata_palmld.c
+++ b/drivers/ata/pata_palmld.c
@@ -85,7 +85,7 @@ static __devinit int palmld_pata_probe(struct platform_device *pdev)
ap = host->ports[0];
ap->ops = &palmld_port_ops;
ap->pio_mask = ATA_PIO4;
- ap->flags |= ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY | ATA_FLAG_PIO_POLLING;
+ ap->flags |= ATA_FLAG_PIO_POLLING;
/* memory mapping voodoo */
ap->ioaddr.cmd_addr = mem + 0x10;
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 8062921..29af660 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -124,7 +124,7 @@ static unsigned int ata_data_xfer_8bit(struct ata_device *dev,
* reset will recover the device.
*
*/
-
+
static void pcmcia_8bit_drain_fifo(struct ata_queued_cmd *qc)
{
int count;
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index b183511..9765ace 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -150,8 +150,7 @@ static struct ata_port_operations pdc2027x_pata133_ops = {
static struct ata_port_info pdc2027x_port_info[] = {
/* PDC_UDMA_100 */
{
- .flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SLAVE_POSS |
- ATA_FLAG_MMIO,
+ .flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA5,
@@ -159,8 +158,7 @@ static struct ata_port_info pdc2027x_port_info[] = {
},
/* PDC_UDMA_133 */
{
- .flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SLAVE_POSS |
- ATA_FLAG_MMIO,
+ .flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index 1898c6e..b4ede40 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -292,7 +292,6 @@ static int __devinit pxa_ata_probe(struct platform_device *pdev)
ap->ops = &pxa_ata_port_ops;
ap->pio_mask = ATA_PIO4;
ap->mwdma_mask = ATA_MWDMA2;
- ap->flags = ATA_FLAG_MMIO;
ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start,
resource_size(cmd_res));
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 0ffd631..baeaf938 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -91,7 +91,6 @@ static void rb532_pata_setup_ports(struct ata_host *ah)
ap->ops = &rb532_pata_port_ops;
ap->pio_mask = ATA_PIO4;
- ap->flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO;
ap->ioaddr.cmd_addr = info->iobase + RB500_CF_REG_BASE;
ap->ioaddr.ctl_addr = info->iobase + RB500_CF_REG_CTRL;
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index 8a51d67..c446ae6 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -531,7 +531,6 @@ static int __init pata_s3c_probe(struct platform_device *pdev)
}
ap = host->ports[0];
- ap->flags |= ATA_FLAG_MMIO;
ap->pio_mask = ATA_PIO4;
if (cpu_type == TYPE_S3C64XX) {
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 093715c..88ea9b6 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -959,7 +959,7 @@ static struct ata_port_operations scc_pata_ops = {
static struct ata_port_info scc_port_info[] = {
{
- .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY,
+ .flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
/* No MWDMA */
.udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 60cea13..c04abc3 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -593,7 +593,7 @@ static const struct ata_port_info sis_info133 = {
.port_ops = &sis_133_ops,
};
const struct ata_port_info sis_info133_for_sata = {
- .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
+ .flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
/* No MWDMA */
.udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index adbe042..1111712 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -166,9 +166,7 @@ static struct ata_port_operations adma_ata_ops = {
static struct ata_port_info adma_port_info[] = {
/* board_1841_idx */
{
- .flags = ATA_FLAG_SLAVE_POSS |
- ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO |
- ATA_FLAG_PIO_POLLING,
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_POLLING,
.pio_mask = ATA_PIO4_ONLY,
.udma_mask = ATA_UDMA4,
.port_ops = &adma_ata_ops,
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 6cf57c5..712ab5a 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -40,8 +40,11 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
+/* These two are defined in "libata.h" */
+#undef DRV_NAME
+#undef DRV_VERSION
#define DRV_NAME "sata-dwc"
-#define DRV_VERSION "1.0"
+#define DRV_VERSION "1.3"
/* SATA DMA driver Globals */
#define DMA_NUM_CHANS 1
@@ -333,11 +336,47 @@ static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
void __iomem *addr, int dir);
static void dma_dwc_xfer_start(int dma_ch);
+static const char *get_prot_descript(u8 protocol)
+{
+ switch ((enum ata_tf_protocols)protocol) {
+ case ATA_PROT_NODATA:
+ return "ATA no data";
+ case ATA_PROT_PIO:
+ return "ATA PIO";
+ case ATA_PROT_DMA:
+ return "ATA DMA";
+ case ATA_PROT_NCQ:
+ return "ATA NCQ";
+ case ATAPI_PROT_NODATA:
+ return "ATAPI no data";
+ case ATAPI_PROT_PIO:
+ return "ATAPI PIO";
+ case ATAPI_PROT_DMA:
+ return "ATAPI DMA";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *get_dma_dir_descript(int dma_dir)
+{
+ switch ((enum dma_data_direction)dma_dir) {
+ case DMA_BIDIRECTIONAL:
+ return "bidirectional";
+ case DMA_TO_DEVICE:
+ return "to device";
+ case DMA_FROM_DEVICE:
+ return "from device";
+ default:
+ return "none";
+ }
+}
+
static void sata_dwc_tf_dump(struct ata_taskfile *tf)
{
dev_vdbg(host_pvt.dwc_dev, "taskfile cmd: 0x%02x protocol: %s flags:"
- "0x%lx device: %x\n", tf->command, ata_get_cmd_descript\
- (tf->protocol), tf->flags, tf->device);
+ "0x%lx device: %x\n", tf->command,
+ get_prot_descript(tf->protocol), tf->flags, tf->device);
dev_vdbg(host_pvt.dwc_dev, "feature: 0x%02x nsect: 0x%x lbal: 0x%x "
"lbam: 0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal,
tf->lbam, tf->lbah);
@@ -715,7 +754,7 @@ static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
/* Program the CTL register with src enable / dst enable */
out_le32(&(host_pvt.sata_dma_regs->chan_regs[dma_ch].ctl.low),
DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN);
- return 0;
+ return dma_ch;
}
/*
@@ -967,7 +1006,7 @@ static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
}
dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n",
- __func__, ata_get_cmd_descript(qc->tf.protocol));
+ __func__, get_prot_descript(qc->tf.protocol));
DRVSTILLBUSY:
if (ata_is_dma(qc->tf.protocol)) {
/*
@@ -1057,7 +1096,7 @@ DRVSTILLBUSY:
/* Process completed command */
dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
- ata_get_cmd_descript(qc->tf.protocol));
+ get_prot_descript(qc->tf.protocol));
if (ata_is_dma(qc->tf.protocol)) {
host_pvt.dma_interrupt_count++;
if (hsdevp->dma_pending[tag] == \
@@ -1142,8 +1181,8 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
if (tag > 0) {
dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s "
"dmacr=0x%08x\n", __func__, qc->tag, qc->tf.command,
- ata_get_cmd_descript(qc->dma_dir),
- ata_get_cmd_descript(qc->tf.protocol),
+ get_dma_dir_descript(qc->dma_dir),
+ get_prot_descript(qc->tf.protocol),
in_le32(&(hsdev->sata_dwc_regs->dmacr)));
}
#endif
@@ -1354,7 +1393,7 @@ static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command,
- ata_get_cmd_descript(tf), tag);
+ ata_get_cmd_descript(tf->command), tag);
spin_lock_irqsave(&ap->host->lock, flags);
hsdevp->cmd_issued[tag] = cmd_issued;
@@ -1413,7 +1452,7 @@ static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
dev_dbg(ap->dev, "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s "
"start_dma? %x\n", __func__, qc, tag, qc->tf.command,
- ata_get_cmd_descript(qc->dma_dir), start_dma);
+ get_dma_dir_descript(qc->dma_dir), start_dma);
sata_dwc_tf_dump(&(qc->tf));
if (start_dma) {
@@ -1462,10 +1501,9 @@ static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
int dma_chan;
struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
- int err;
dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n",
- __func__, ap->port_no, ata_get_cmd_descript(qc->dma_dir),
+ __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir),
qc->n_elem);
dma_chan = dma_dwc_xfer_setup(sg, qc->n_elem, hsdevp->llit[tag],
@@ -1474,7 +1512,7 @@ static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
dmadr), qc->dma_dir);
if (dma_chan < 0) {
dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n",
- __func__, err);
+ __func__, dma_chan);
return;
}
hsdevp->dma_chan[tag] = dma_chan;
@@ -1491,8 +1529,8 @@ static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d "
"prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n",
__func__, ap->print_id, qc->tf.command,
- ata_get_cmd_descript(&qc->tf),
- qc->tag, ata_get_cmd_descript(qc->tf.protocol),
+ ata_get_cmd_descript(qc->tf.command),
+ qc->tag, get_prot_descript(qc->tf.protocol),
ap->link.active_tag, ap->link.sactive);
#endif
@@ -1533,7 +1571,7 @@ static void sata_dwc_qc_prep(struct ata_queued_cmd *qc)
#ifdef DEBUG_NCQ
if (qc->tag > 0)
dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n",
- __func__, tag, qc->ap->link.active_tag);
+ __func__, qc->tag, qc->ap->link.active_tag);
return ;
#endif
@@ -1580,9 +1618,8 @@ static struct ata_port_operations sata_dwc_ops = {
static const struct ata_port_info sata_dwc_port_info[] = {
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_NCQ,
- .pio_mask = 0x1f, /* pio 0-4 */
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
+ .pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &sata_dwc_ops,
},
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index b0214d0..7f9eab3 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -33,8 +33,7 @@ enum {
SATA_FSL_MAX_PRD_USABLE = SATA_FSL_MAX_PRD - 1,
SATA_FSL_MAX_PRD_DIRECT = 16, /* Direct PRDT entries */
- SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
+ SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
ATA_FLAG_PMP | ATA_FLAG_NCQ | ATA_FLAG_AN),
SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH,
@@ -186,6 +185,11 @@ enum {
COMMANDSTAT = 0x20,
};
+/* TRANSCFG (transport-layer) configuration control */
+enum {
+ TRANSCFG_RX_WATER_MARK = (1 << 4),
+};
+
/* PHY (link-layer) configuration control */
enum {
PHY_BIST_ENABLE = 0x01,
@@ -1040,12 +1044,15 @@ static void sata_fsl_error_intr(struct ata_port *ap)
/* find out the offending link and qc */
if (ap->nr_pmp_links) {
+ unsigned int dev_num;
+
dereg = ioread32(hcr_base + DE);
iowrite32(dereg, hcr_base + DE);
iowrite32(cereg, hcr_base + CE);
- if (dereg < ap->nr_pmp_links) {
- link = &ap->pmp_link[dereg];
+ dev_num = ffs(dereg) - 1;
+ if (dev_num < ap->nr_pmp_links && dereg != 0) {
+ link = &ap->pmp_link[dev_num];
ehi = &link->eh_info;
qc = ata_qc_from_tag(ap, link->active_tag);
/*
@@ -1303,6 +1310,7 @@ static int sata_fsl_probe(struct platform_device *ofdev,
struct sata_fsl_host_priv *host_priv = NULL;
int irq;
struct ata_host *host;
+ u32 temp;
struct ata_port_info pi = sata_fsl_port_info[0];
const struct ata_port_info *ppi[] = { &pi, NULL };
@@ -1317,6 +1325,12 @@ static int sata_fsl_probe(struct platform_device *ofdev,
ssr_base = hcr_base + 0x100;
csr_base = hcr_base + 0x140;
+ if (!of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc8315-sata")) {
+ temp = ioread32(csr_base + TRANSCFG);
+ temp = temp & 0xffffffe0;
+ iowrite32(temp | TRANSCFG_RX_WATER_MARK, csr_base + TRANSCFG);
+ }
+
DPRINTK("@reset i/o = 0x%x\n", ioread32(csr_base + TRANSCFG));
DPRINTK("sizeof(cmd_desc) = %d\n", sizeof(struct command_desc));
DPRINTK("sizeof(#define cmd_desc) = %d\n", SATA_FSL_CMD_DESC_SIZE);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index bf74a36..cd40651 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -160,8 +160,7 @@ enum {
/* Host Flags */
MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
- MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
+ MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 7254e25..42344e3 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -539,7 +539,7 @@ struct nv_pi_priv {
static const struct ata_port_info nv_port_info[] = {
/* generic */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .flags = ATA_FLAG_SATA,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
@@ -548,7 +548,7 @@ static const struct ata_port_info nv_port_info[] = {
},
/* nforce2/3 */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .flags = ATA_FLAG_SATA,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
@@ -557,7 +557,7 @@ static const struct ata_port_info nv_port_info[] = {
},
/* ck804 */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .flags = ATA_FLAG_SATA,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
@@ -566,8 +566,7 @@ static const struct ata_port_info nv_port_info[] = {
},
/* ADMA */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_NCQ,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
@@ -576,7 +575,7 @@ static const struct ata_port_info nv_port_info[] = {
},
/* MCP5x */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .flags = ATA_FLAG_SATA,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
@@ -585,8 +584,7 @@ static const struct ata_port_info nv_port_info[] = {
},
/* SWNCQ */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_NCQ,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index f03ad48..a004b1e 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -134,9 +134,7 @@ enum {
PDC_IRQ_DISABLE = (1 << 10),
PDC_RESET = (1 << 11), /* HDMA reset */
- PDC_COMMON_FLAGS = ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO |
- ATA_FLAG_PIO_POLLING,
+ PDC_COMMON_FLAGS = ATA_FLAG_PIO_POLLING,
/* ap->flags bits */
PDC_FLAG_GEN_II = (1 << 24),
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index daeebf1..c560326 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -155,8 +155,7 @@ static struct ata_port_operations qs_ata_ops = {
static const struct ata_port_info qs_port_info[] = {
/* board_2068_idx */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
.pio_mask = ATA_PIO4_ONLY,
.udma_mask = ATA_UDMA6,
.port_ops = &qs_ata_ops,
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 3a4f842..b42edaa 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -61,8 +61,7 @@ enum {
SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
SIL_FLAG_MOD15WRITE = (1 << 30),
- SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO,
+ SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA,
/*
* Controller IDs
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index af41c6f..06c564e 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -244,8 +244,7 @@ enum {
BID_SIL3131 = 2,
/* host flags */
- SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
+ SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA |
ATA_FLAG_AN | ATA_FLAG_PMP,
SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index 2bfe3ae..cdcc13e 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -96,7 +96,7 @@ static struct ata_port_operations sis_ops = {
};
static const struct ata_port_info sis_port_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .flags = ATA_FLAG_SATA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 7d9db4a..35eabcf 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -359,8 +359,7 @@ static struct ata_port_operations k2_sata_ops = {
static const struct ata_port_info k2_port_info[] = {
/* chip_svw4 */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA,
+ .flags = ATA_FLAG_SATA | K2_FLAG_NO_ATAPI_DMA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
@@ -368,8 +367,7 @@ static const struct ata_port_info k2_port_info[] = {
},
/* chip_svw8 */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA |
+ .flags = ATA_FLAG_SATA | K2_FLAG_NO_ATAPI_DMA |
K2_FLAG_SATA_8_PORTS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
@@ -378,8 +376,7 @@ static const struct ata_port_info k2_port_info[] = {
},
/* chip_svw42 */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO | K2_FLAG_BAR_POS_3,
+ .flags = ATA_FLAG_SATA | K2_FLAG_BAR_POS_3,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
@@ -387,8 +384,7 @@ static const struct ata_port_info k2_port_info[] = {
},
/* chip_svw43 */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO,
+ .flags = ATA_FLAG_SATA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index bedd518..8fd3b72 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -273,9 +273,8 @@ static struct ata_port_operations pdc_20621_ops = {
static const struct ata_port_info pdc_port_info[] = {
/* board_20621 */
{
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_SRST | ATA_FLAG_MMIO |
- ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
+ ATA_FLAG_PIO_POLLING,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index b8578c3..235be71 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -88,8 +88,7 @@ static struct ata_port_operations uli_ops = {
};
static const struct ata_port_info uli_port_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_IGN_SIMPLEX,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_IGN_SIMPLEX,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &uli_ops,
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 8b677bb..21242c5 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -148,7 +148,7 @@ static struct ata_port_operations vt8251_ops = {
};
static const struct ata_port_info vt6420_port_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .flags = ATA_FLAG_SATA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
@@ -156,7 +156,7 @@ static const struct ata_port_info vt6420_port_info = {
};
static struct ata_port_info vt6421_sport_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
+ .flags = ATA_FLAG_SATA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
@@ -164,7 +164,7 @@ static struct ata_port_info vt6421_sport_info = {
};
static struct ata_port_info vt6421_pport_info = {
- .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_LEGACY,
+ .flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
/* No MWDMA */
.udma_mask = ATA_UDMA6,
@@ -172,8 +172,7 @@ static struct ata_port_info vt6421_pport_info = {
};
static struct ata_port_info vt8251_port_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS |
- ATA_FLAG_NO_LEGACY,
+ .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index e079cf2..7c98737 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -340,8 +340,7 @@ static int __devinit vsc_sata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
static const struct ata_port_info pi = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
- ATA_FLAG_MMIO,
+ .flags = ATA_FLAG_SATA,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 73fb1c4..25ef1a4 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -866,8 +866,9 @@ static int popen(struct atm_vcc *vcc)
}
skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
- if (!skb && net_ratelimit()) {
- dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
+ if (!skb) {
+ if (net_ratelimit())
+ dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
return -ENOMEM;
}
header = (void *)skb_put(skb, sizeof(*header));
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 5f51c3b..4c5701c 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -1,6 +1,6 @@
# Makefile for the Linux device tree
-obj-y := core.o sys.o bus.o dd.o \
+obj-y := core.o sys.o bus.o dd.o syscore.o \
driver.o class.o platform.o \
cpu.o firmware.o init.o map.o devres.o \
attribute_container.o transport_class.o
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index abe46ed..118c1b9 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,7 +1,6 @@
-obj-$(CONFIG_PM) += sysfs.o
+obj-$(CONFIG_PM) += sysfs.o generic_ops.o
obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
obj-$(CONFIG_PM_RUNTIME) += runtime.o
-obj-$(CONFIG_PM_OPS) += generic_ops.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_PM_OPP) += opp.o
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 8340497..052dc53 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -423,26 +423,22 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
TRACE_DEVICE(dev);
TRACE_RESUME(0);
- if (dev->bus && dev->bus->pm) {
- pm_dev_dbg(dev, state, "EARLY ");
- error = pm_noirq_op(dev, dev->bus->pm, state);
- if (error)
- goto End;
+ if (dev->pwr_domain) {
+ pm_dev_dbg(dev, state, "EARLY power domain ");
+ pm_noirq_op(dev, &dev->pwr_domain->ops, state);
}
if (dev->type && dev->type->pm) {
pm_dev_dbg(dev, state, "EARLY type ");
error = pm_noirq_op(dev, dev->type->pm, state);
- if (error)
- goto End;
- }
-
- if (dev->class && dev->class->pm) {
+ } else if (dev->class && dev->class->pm) {
pm_dev_dbg(dev, state, "EARLY class ");
error = pm_noirq_op(dev, dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ pm_dev_dbg(dev, state, "EARLY ");
+ error = pm_noirq_op(dev, dev->bus->pm, state);
}
-End:
TRACE_RESUME(error);
return error;
}
@@ -518,36 +514,39 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
dev->power.in_suspend = false;
- if (dev->bus) {
- if (dev->bus->pm) {
- pm_dev_dbg(dev, state, "");
- error = pm_op(dev, dev->bus->pm, state);
- } else if (dev->bus->resume) {
- pm_dev_dbg(dev, state, "legacy ");
- error = legacy_resume(dev, dev->bus->resume);
- }
- if (error)
- goto End;
+ if (dev->pwr_domain) {
+ pm_dev_dbg(dev, state, "power domain ");
+ pm_op(dev, &dev->pwr_domain->ops, state);
}
- if (dev->type) {
- if (dev->type->pm) {
- pm_dev_dbg(dev, state, "type ");
- error = pm_op(dev, dev->type->pm, state);
- }
- if (error)
- goto End;
+ if (dev->type && dev->type->pm) {
+ pm_dev_dbg(dev, state, "type ");
+ error = pm_op(dev, dev->type->pm, state);
+ goto End;
}
if (dev->class) {
if (dev->class->pm) {
pm_dev_dbg(dev, state, "class ");
error = pm_op(dev, dev->class->pm, state);
+ goto End;
} else if (dev->class->resume) {
pm_dev_dbg(dev, state, "legacy class ");
error = legacy_resume(dev, dev->class->resume);
+ goto End;
}
}
+
+ if (dev->bus) {
+ if (dev->bus->pm) {
+ pm_dev_dbg(dev, state, "");
+ error = pm_op(dev, dev->bus->pm, state);
+ } else if (dev->bus->resume) {
+ pm_dev_dbg(dev, state, "legacy ");
+ error = legacy_resume(dev, dev->bus->resume);
+ }
+ }
+
End:
device_unlock(dev);
complete_all(&dev->power.completion);
@@ -629,19 +628,23 @@ static void device_complete(struct device *dev, pm_message_t state)
{
device_lock(dev);
- if (dev->class && dev->class->pm && dev->class->pm->complete) {
- pm_dev_dbg(dev, state, "completing class ");
- dev->class->pm->complete(dev);
+ if (dev->pwr_domain && dev->pwr_domain->ops.complete) {
+ pm_dev_dbg(dev, state, "completing power domain ");
+ dev->pwr_domain->ops.complete(dev);
}
- if (dev->type && dev->type->pm && dev->type->pm->complete) {
+ if (dev->type && dev->type->pm) {
pm_dev_dbg(dev, state, "completing type ");
- dev->type->pm->complete(dev);
- }
-
- if (dev->bus && dev->bus->pm && dev->bus->pm->complete) {
+ if (dev->type->pm->complete)
+ dev->type->pm->complete(dev);
+ } else if (dev->class && dev->class->pm) {
+ pm_dev_dbg(dev, state, "completing class ");
+ if (dev->class->pm->complete)
+ dev->class->pm->complete(dev);
+ } else if (dev->bus && dev->bus->pm) {
pm_dev_dbg(dev, state, "completing ");
- dev->bus->pm->complete(dev);
+ if (dev->bus->pm->complete)
+ dev->bus->pm->complete(dev);
}
device_unlock(dev);
@@ -669,7 +672,6 @@ static void dpm_complete(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
device_complete(dev, state);
- pm_runtime_put_sync(dev);
mutex_lock(&dpm_list_mtx);
put_device(dev);
@@ -727,29 +729,31 @@ static pm_message_t resume_event(pm_message_t sleep_state)
*/
static int device_suspend_noirq(struct device *dev, pm_message_t state)
{
- int error = 0;
-
- if (dev->class && dev->class->pm) {
- pm_dev_dbg(dev, state, "LATE class ");
- error = pm_noirq_op(dev, dev->class->pm, state);
- if (error)
- goto End;
- }
+ int error;
if (dev->type && dev->type->pm) {
pm_dev_dbg(dev, state, "LATE type ");
error = pm_noirq_op(dev, dev->type->pm, state);
if (error)
- goto End;
- }
-
- if (dev->bus && dev->bus->pm) {
+ return error;
+ } else if (dev->class && dev->class->pm) {
+ pm_dev_dbg(dev, state, "LATE class ");
+ error = pm_noirq_op(dev, dev->class->pm, state);
+ if (error)
+ return error;
+ } else if (dev->bus && dev->bus->pm) {
pm_dev_dbg(dev, state, "LATE ");
error = pm_noirq_op(dev, dev->bus->pm, state);
+ if (error)
+ return error;
}
-End:
- return error;
+ if (dev->pwr_domain) {
+ pm_dev_dbg(dev, state, "LATE power domain ");
+ pm_noirq_op(dev, &dev->pwr_domain->ops, state);
+ }
+
+ return 0;
}
/**
@@ -836,25 +840,22 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
goto End;
}
+ if (dev->type && dev->type->pm) {
+ pm_dev_dbg(dev, state, "type ");
+ error = pm_op(dev, dev->type->pm, state);
+ goto Domain;
+ }
+
if (dev->class) {
if (dev->class->pm) {
pm_dev_dbg(dev, state, "class ");
error = pm_op(dev, dev->class->pm, state);
+ goto Domain;
} else if (dev->class->suspend) {
pm_dev_dbg(dev, state, "legacy class ");
error = legacy_suspend(dev, state, dev->class->suspend);
+ goto Domain;
}
- if (error)
- goto End;
- }
-
- if (dev->type) {
- if (dev->type->pm) {
- pm_dev_dbg(dev, state, "type ");
- error = pm_op(dev, dev->type->pm, state);
- }
- if (error)
- goto End;
}
if (dev->bus) {
@@ -867,6 +868,12 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
}
+ Domain:
+ if (!error && dev->pwr_domain) {
+ pm_dev_dbg(dev, state, "power domain ");
+ pm_op(dev, &dev->pwr_domain->ops, state);
+ }
+
End:
device_unlock(dev);
complete_all(&dev->power.completion);
@@ -957,27 +964,34 @@ static int device_prepare(struct device *dev, pm_message_t state)
device_lock(dev);
- if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) {
+ if (dev->type && dev->type->pm) {
+ pm_dev_dbg(dev, state, "preparing type ");
+ if (dev->type->pm->prepare)
+ error = dev->type->pm->prepare(dev);
+ suspend_report_result(dev->type->pm->prepare, error);
+ if (error)
+ goto End;
+ } else if (dev->class && dev->class->pm) {
+ pm_dev_dbg(dev, state, "preparing class ");
+ if (dev->class->pm->prepare)
+ error = dev->class->pm->prepare(dev);
+ suspend_report_result(dev->class->pm->prepare, error);
+ if (error)
+ goto End;
+ } else if (dev->bus && dev->bus->pm) {
pm_dev_dbg(dev, state, "preparing ");
- error = dev->bus->pm->prepare(dev);
+ if (dev->bus->pm->prepare)
+ error = dev->bus->pm->prepare(dev);
suspend_report_result(dev->bus->pm->prepare, error);
if (error)
goto End;
}
- if (dev->type && dev->type->pm && dev->type->pm->prepare) {
- pm_dev_dbg(dev, state, "preparing type ");
- error = dev->type->pm->prepare(dev);
- suspend_report_result(dev->type->pm->prepare, error);
- if (error)
- goto End;
+ if (dev->pwr_domain && dev->pwr_domain->ops.prepare) {
+ pm_dev_dbg(dev, state, "preparing power domain ");
+ dev->pwr_domain->ops.prepare(dev);
}
- if (dev->class && dev->class->pm && dev->class->pm->prepare) {
- pm_dev_dbg(dev, state, "preparing class ");
- error = dev->class->pm->prepare(dev);
- suspend_report_result(dev->class->pm->prepare, error);
- }
End:
device_unlock(dev);
@@ -1005,12 +1019,9 @@ static int dpm_prepare(pm_message_t state)
if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
pm_wakeup_event(dev, 0);
- if (pm_wakeup_pending()) {
- pm_runtime_put_sync(dev);
- error = -EBUSY;
- } else {
- error = device_prepare(dev, state);
- }
+ pm_runtime_put_sync(dev);
+ error = pm_wakeup_pending() ?
+ -EBUSY : device_prepare(dev, state);
mutex_lock(&dpm_list_mtx);
if (error) {
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 2bb9b4c..56a6899 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -222,7 +222,7 @@ int opp_get_opp_count(struct device *dev)
* opp_find_freq_exact() - search for an exact frequency
* @dev: device for which we do this operation
* @freq: frequency to search for
- * @is_available: true/false - match for available opp
+ * @available: true/false - match for available opp
*
* Searches for exact match in the opp list and returns pointer to the matching
* opp if found, else returns ERR_PTR in case of error and should be handled
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 698dde7..f2a25f1 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -58,19 +58,18 @@ static inline void device_pm_move_last(struct device *dev) {}
* sysfs.c
*/
-extern int dpm_sysfs_add(struct device *);
-extern void dpm_sysfs_remove(struct device *);
-extern void rpm_sysfs_remove(struct device *);
+extern int dpm_sysfs_add(struct device *dev);
+extern void dpm_sysfs_remove(struct device *dev);
+extern void rpm_sysfs_remove(struct device *dev);
+extern int wakeup_sysfs_add(struct device *dev);
+extern void wakeup_sysfs_remove(struct device *dev);
#else /* CONFIG_PM */
-static inline int dpm_sysfs_add(struct device *dev)
-{
- return 0;
-}
-
-static inline void dpm_sysfs_remove(struct device *dev)
-{
-}
+static inline int dpm_sysfs_add(struct device *dev) { return 0; }
+static inline void dpm_sysfs_remove(struct device *dev) {}
+static inline void rpm_sysfs_remove(struct device *dev) {}
+static inline int wakeup_sysfs_add(struct device *dev) { return 0; }
+static inline void wakeup_sysfs_remove(struct device *dev) {}
#endif
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 42615b4..54597c8 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -168,6 +168,7 @@ static int rpm_check_suspend_allowed(struct device *dev)
static int rpm_idle(struct device *dev, int rpmflags)
{
int (*callback)(struct device *);
+ int (*domain_callback)(struct device *);
int retval;
retval = rpm_check_suspend_allowed(dev);
@@ -213,19 +214,28 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.idle_notification = true;
- if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle)
- callback = dev->bus->pm->runtime_idle;
- else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle)
+ if (dev->type && dev->type->pm)
callback = dev->type->pm->runtime_idle;
else if (dev->class && dev->class->pm)
callback = dev->class->pm->runtime_idle;
+ else if (dev->bus && dev->bus->pm)
+ callback = dev->bus->pm->runtime_idle;
else
callback = NULL;
- if (callback) {
+ if (dev->pwr_domain)
+ domain_callback = dev->pwr_domain->ops.runtime_idle;
+ else
+ domain_callback = NULL;
+
+ if (callback || domain_callback) {
spin_unlock_irq(&dev->power.lock);
- callback(dev);
+ if (domain_callback)
+ retval = domain_callback(dev);
+
+ if (!retval && callback)
+ callback(dev);
spin_lock_irq(&dev->power.lock);
}
@@ -372,12 +382,12 @@ static int rpm_suspend(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_SUSPENDING);
- if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
- callback = dev->bus->pm->runtime_suspend;
- else if (dev->type && dev->type->pm && dev->type->pm->runtime_suspend)
+ if (dev->type && dev->type->pm)
callback = dev->type->pm->runtime_suspend;
else if (dev->class && dev->class->pm)
callback = dev->class->pm->runtime_suspend;
+ else if (dev->bus && dev->bus->pm)
+ callback = dev->bus->pm->runtime_suspend;
else
callback = NULL;
@@ -390,6 +400,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
else
pm_runtime_cancel_pending(dev);
} else {
+ if (dev->pwr_domain)
+ rpm_callback(dev->pwr_domain->ops.runtime_suspend, dev);
no_callback:
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_deactivate_timer(dev);
@@ -569,12 +581,15 @@ static int rpm_resume(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_RESUMING);
- if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
- callback = dev->bus->pm->runtime_resume;
- else if (dev->type && dev->type->pm && dev->type->pm->runtime_resume)
+ if (dev->pwr_domain)
+ rpm_callback(dev->pwr_domain->ops.runtime_resume, dev);
+
+ if (dev->type && dev->type->pm)
callback = dev->type->pm->runtime_resume;
else if (dev->class && dev->class->pm)
callback = dev->class->pm->runtime_resume;
+ else if (dev->bus && dev->bus->pm)
+ callback = dev->bus->pm->runtime_resume;
else
callback = NULL;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 0b1e46b..fff49be 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -431,26 +431,18 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(async, 0644, async_show, async_store);
#endif /* CONFIG_PM_ADVANCED_DEBUG */
-static struct attribute * power_attrs[] = {
- &dev_attr_wakeup.attr,
-#ifdef CONFIG_PM_SLEEP
- &dev_attr_wakeup_count.attr,
- &dev_attr_wakeup_active_count.attr,
- &dev_attr_wakeup_hit_count.attr,
- &dev_attr_wakeup_active.attr,
- &dev_attr_wakeup_total_time_ms.attr,
- &dev_attr_wakeup_max_time_ms.attr,
- &dev_attr_wakeup_last_time_ms.attr,
-#endif
+static struct attribute *power_attrs[] = {
#ifdef CONFIG_PM_ADVANCED_DEBUG
+#ifdef CONFIG_PM_SLEEP
&dev_attr_async.attr,
+#endif
#ifdef CONFIG_PM_RUNTIME
&dev_attr_runtime_status.attr,
&dev_attr_runtime_usage.attr,
&dev_attr_runtime_active_kids.attr,
&dev_attr_runtime_enabled.attr,
#endif
-#endif
+#endif /* CONFIG_PM_ADVANCED_DEBUG */
NULL,
};
static struct attribute_group pm_attr_group = {
@@ -458,9 +450,26 @@ static struct attribute_group pm_attr_group = {
.attrs = power_attrs,
};
-#ifdef CONFIG_PM_RUNTIME
+static struct attribute *wakeup_attrs[] = {
+#ifdef CONFIG_PM_SLEEP
+ &dev_attr_wakeup.attr,
+ &dev_attr_wakeup_count.attr,
+ &dev_attr_wakeup_active_count.attr,
+ &dev_attr_wakeup_hit_count.attr,
+ &dev_attr_wakeup_active.attr,
+ &dev_attr_wakeup_total_time_ms.attr,
+ &dev_attr_wakeup_max_time_ms.attr,
+ &dev_attr_wakeup_last_time_ms.attr,
+#endif
+ NULL,
+};
+static struct attribute_group pm_wakeup_attr_group = {
+ .name = power_group_name,
+ .attrs = wakeup_attrs,
+};
static struct attribute *runtime_attrs[] = {
+#ifdef CONFIG_PM_RUNTIME
#ifndef CONFIG_PM_ADVANCED_DEBUG
&dev_attr_runtime_status.attr,
#endif
@@ -468,6 +477,7 @@ static struct attribute *runtime_attrs[] = {
&dev_attr_runtime_suspended_time.attr,
&dev_attr_runtime_active_time.attr,
&dev_attr_autosuspend_delay_ms.attr,
+#endif /* CONFIG_PM_RUNTIME */
NULL,
};
static struct attribute_group pm_runtime_attr_group = {
@@ -480,35 +490,49 @@ int dpm_sysfs_add(struct device *dev)
int rc;
rc = sysfs_create_group(&dev->kobj, &pm_attr_group);
- if (rc == 0 && !dev->power.no_callbacks) {
+ if (rc)
+ return rc;
+
+ if (pm_runtime_callbacks_present(dev)) {
rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group);
if (rc)
- sysfs_remove_group(&dev->kobj, &pm_attr_group);
+ goto err_out;
+ }
+
+ if (device_can_wakeup(dev)) {
+ rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
+ if (rc) {
+ if (pm_runtime_callbacks_present(dev))
+ sysfs_unmerge_group(&dev->kobj,
+ &pm_runtime_attr_group);
+ goto err_out;
+ }
}
+ return 0;
+
+ err_out:
+ sysfs_remove_group(&dev->kobj, &pm_attr_group);
return rc;
}
-void rpm_sysfs_remove(struct device *dev)
+int wakeup_sysfs_add(struct device *dev)
{
- sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
+ return sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
}
-void dpm_sysfs_remove(struct device *dev)
+void wakeup_sysfs_remove(struct device *dev)
{
- rpm_sysfs_remove(dev);
- sysfs_remove_group(&dev->kobj, &pm_attr_group);
+ sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
}
-#else /* CONFIG_PM_RUNTIME */
-
-int dpm_sysfs_add(struct device * dev)
+void rpm_sysfs_remove(struct device *dev)
{
- return sysfs_create_group(&dev->kobj, &pm_attr_group);
+ sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
}
-void dpm_sysfs_remove(struct device * dev)
+void dpm_sysfs_remove(struct device *dev)
{
+ rpm_sysfs_remove(dev);
+ sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
sysfs_remove_group(&dev->kobj, &pm_attr_group);
}
-
-#endif
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index 9f4258d..c80e138 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -112,7 +112,7 @@ static unsigned int read_magic_time(void)
unsigned int val;
get_rtc_time(&time);
- printk("Time: %2d:%02d:%02d Date: %02d/%02d/%02d\n",
+ pr_info("Time: %2d:%02d:%02d Date: %02d/%02d/%02d\n",
time.tm_hour, time.tm_min, time.tm_sec,
time.tm_mon + 1, time.tm_mday, time.tm_year % 100);
val = time.tm_year; /* 100 years */
@@ -179,7 +179,7 @@ static int show_file_hash(unsigned int value)
unsigned int hash = hash_string(lineno, file, FILEHASH);
if (hash != value)
continue;
- printk(" hash matches %s:%u\n", file, lineno);
+ pr_info(" hash matches %s:%u\n", file, lineno);
match++;
}
return match;
@@ -255,7 +255,7 @@ static int late_resume_init(void)
val = val / FILEHASH;
dev = val /* % DEVHASH */;
- printk(" Magic number: %d:%d:%d\n", user, file, dev);
+ pr_info(" Magic number: %d:%d:%d\n", user, file, dev);
show_file_hash(file);
show_dev_hash(dev);
return 0;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 8ec406d..4573c83 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -24,12 +24,26 @@
*/
bool events_check_enabled;
-/* The counter of registered wakeup events. */
-static atomic_t event_count = ATOMIC_INIT(0);
-/* A preserved old value of event_count. */
+/*
+ * Combined counters of registered wakeup events and wakeup events in progress.
+ * They need to be modified together atomically, so it's better to use one
+ * atomic variable to hold them both.
+ */
+static atomic_t combined_event_count = ATOMIC_INIT(0);
+
+#define IN_PROGRESS_BITS (sizeof(int) * 4)
+#define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
+
+static void split_counters(unsigned int *cnt, unsigned int *inpr)
+{
+ unsigned int comb = atomic_read(&combined_event_count);
+
+ *cnt = (comb >> IN_PROGRESS_BITS);
+ *inpr = comb & MAX_IN_PROGRESS;
+}
+
+/* A preserved old value of the events counter. */
static unsigned int saved_count;
-/* The counter of wakeup events being processed. */
-static atomic_t events_in_progress = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(events_lock);
@@ -228,6 +242,35 @@ int device_wakeup_disable(struct device *dev)
EXPORT_SYMBOL_GPL(device_wakeup_disable);
/**
+ * device_set_wakeup_capable - Set/reset device wakeup capability flag.
+ * @dev: Device to handle.
+ * @capable: Whether or not @dev is capable of waking up the system from sleep.
+ *
+ * If @capable is set, set the @dev's power.can_wakeup flag and add its
+ * wakeup-related attributes to sysfs. Otherwise, unset the @dev's
+ * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
+ *
+ * This function may sleep and it can't be called from any context where
+ * sleeping is not allowed.
+ */
+void device_set_wakeup_capable(struct device *dev, bool capable)
+{
+ if (!!dev->power.can_wakeup == !!capable)
+ return;
+
+ if (device_is_registered(dev)) {
+ if (capable) {
+ if (wakeup_sysfs_add(dev))
+ return;
+ } else {
+ wakeup_sysfs_remove(dev);
+ }
+ }
+ dev->power.can_wakeup = capable;
+}
+EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
+
+/**
* device_init_wakeup - Device wakeup initialization.
* @dev: Device to handle.
* @enable: Whether or not to enable @dev as a wakeup device.
@@ -307,7 +350,8 @@ static void wakeup_source_activate(struct wakeup_source *ws)
ws->timer_expires = jiffies;
ws->last_time = ktime_get();
- atomic_inc(&events_in_progress);
+ /* Increment the counter of events in progress. */
+ atomic_inc(&combined_event_count);
}
/**
@@ -394,14 +438,10 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
del_timer(&ws->timer);
/*
- * event_count has to be incremented before events_in_progress is
- * modified, so that the callers of pm_check_wakeup_events() and
- * pm_save_wakeup_count() don't see the old value of event_count and
- * events_in_progress equal to zero at the same time.
+ * Increment the counter of registered wakeup events and decrement the
+ * couter of wakeup events in progress simultaneously.
*/
- atomic_inc(&event_count);
- smp_mb__before_atomic_dec();
- atomic_dec(&events_in_progress);
+ atomic_add(MAX_IN_PROGRESS, &combined_event_count);
}
/**
@@ -556,8 +596,10 @@ bool pm_wakeup_pending(void)
spin_lock_irqsave(&events_lock, flags);
if (events_check_enabled) {
- ret = ((unsigned int)atomic_read(&event_count) != saved_count)
- || atomic_read(&events_in_progress);
+ unsigned int cnt, inpr;
+
+ split_counters(&cnt, &inpr);
+ ret = (cnt != saved_count || inpr > 0);
events_check_enabled = !ret;
}
spin_unlock_irqrestore(&events_lock, flags);
@@ -573,25 +615,25 @@ bool pm_wakeup_pending(void)
* Store the number of registered wakeup events at the address in @count. Block
* if the current number of wakeup events being processed is nonzero.
*
- * Return false if the wait for the number of wakeup events being processed to
+ * Return 'false' if the wait for the number of wakeup events being processed to
* drop down to zero has been interrupted by a signal (and the current number
- * of wakeup events being processed is still nonzero). Otherwise return true.
+ * of wakeup events being processed is still nonzero). Otherwise return 'true'.
*/
bool pm_get_wakeup_count(unsigned int *count)
{
- bool ret;
-
- if (capable(CAP_SYS_ADMIN))
- events_check_enabled = false;
+ unsigned int cnt, inpr;
- while (atomic_read(&events_in_progress) && !signal_pending(current)) {
+ for (;;) {
+ split_counters(&cnt, &inpr);
+ if (inpr == 0 || signal_pending(current))
+ break;
pm_wakeup_update_hit_counts();
schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
}
- ret = !atomic_read(&events_in_progress);
- *count = atomic_read(&event_count);
- return ret;
+ split_counters(&cnt, &inpr);
+ *count = cnt;
+ return !inpr;
}
/**
@@ -600,24 +642,25 @@ bool pm_get_wakeup_count(unsigned int *count)
*
* If @count is equal to the current number of registered wakeup events and the
* current number of wakeup events being processed is zero, store @count as the
- * old number of registered wakeup events to be used by pm_check_wakeup_events()
- * and return true. Otherwise return false.
+ * old number of registered wakeup events for pm_check_wakeup_events(), enable
+ * wakeup events detection and return 'true'. Otherwise disable wakeup events
+ * detection and return 'false'.
*/
bool pm_save_wakeup_count(unsigned int count)
{
- bool ret = false;
+ unsigned int cnt, inpr;
+ events_check_enabled = false;
spin_lock_irq(&events_lock);
- if (count == (unsigned int)atomic_read(&event_count)
- && !atomic_read(&events_in_progress)) {
+ split_counters(&cnt, &inpr);
+ if (cnt == count && inpr == 0) {
saved_count = count;
events_check_enabled = true;
- ret = true;
}
spin_unlock_irq(&events_lock);
- if (!ret)
+ if (!events_check_enabled)
pm_wakeup_update_hit_counts();
- return ret;
+ return events_check_enabled;
}
static struct dentry *wakeup_sources_stats_dentry;
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
new file mode 100644
index 0000000..90af294
--- /dev/null
+++ b/drivers/base/syscore.c
@@ -0,0 +1,117 @@
+/*
+ * syscore.c - Execution of system core operations.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/syscore_ops.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+
+static LIST_HEAD(syscore_ops_list);
+static DEFINE_MUTEX(syscore_ops_lock);
+
+/**
+ * register_syscore_ops - Register a set of system core operations.
+ * @ops: System core operations to register.
+ */
+void register_syscore_ops(struct syscore_ops *ops)
+{
+ mutex_lock(&syscore_ops_lock);
+ list_add_tail(&ops->node, &syscore_ops_list);
+ mutex_unlock(&syscore_ops_lock);
+}
+EXPORT_SYMBOL_GPL(register_syscore_ops);
+
+/**
+ * unregister_syscore_ops - Unregister a set of system core operations.
+ * @ops: System core operations to unregister.
+ */
+void unregister_syscore_ops(struct syscore_ops *ops)
+{
+ mutex_lock(&syscore_ops_lock);
+ list_del(&ops->node);
+ mutex_unlock(&syscore_ops_lock);
+}
+EXPORT_SYMBOL_GPL(unregister_syscore_ops);
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * syscore_suspend - Execute all the registered system core suspend callbacks.
+ *
+ * This function is executed with one CPU on-line and disabled interrupts.
+ */
+int syscore_suspend(void)
+{
+ struct syscore_ops *ops;
+ int ret = 0;
+
+ WARN_ONCE(!irqs_disabled(),
+ "Interrupts enabled before system core suspend.\n");
+
+ list_for_each_entry_reverse(ops, &syscore_ops_list, node)
+ if (ops->suspend) {
+ if (initcall_debug)
+ pr_info("PM: Calling %pF\n", ops->suspend);
+ ret = ops->suspend();
+ if (ret)
+ goto err_out;
+ WARN_ONCE(!irqs_disabled(),
+ "Interrupts enabled after %pF\n", ops->suspend);
+ }
+
+ return 0;
+
+ err_out:
+ pr_err("PM: System core suspend callback %pF failed.\n", ops->suspend);
+
+ list_for_each_entry_continue(ops, &syscore_ops_list, node)
+ if (ops->resume)
+ ops->resume();
+
+ return ret;
+}
+
+/**
+ * syscore_resume - Execute all the registered system core resume callbacks.
+ *
+ * This function is executed with one CPU on-line and disabled interrupts.
+ */
+void syscore_resume(void)
+{
+ struct syscore_ops *ops;
+
+ WARN_ONCE(!irqs_disabled(),
+ "Interrupts enabled before system core resume.\n");
+
+ list_for_each_entry(ops, &syscore_ops_list, node)
+ if (ops->resume) {
+ if (initcall_debug)
+ pr_info("PM: Calling %pF\n", ops->resume);
+ ops->resume();
+ WARN_ONCE(!irqs_disabled(),
+ "Interrupts enabled after %pF\n", ops->resume);
+ }
+}
+#endif /* CONFIG_PM_SLEEP */
+
+/**
+ * syscore_shutdown - Execute all the registered system core shutdown callbacks.
+ */
+void syscore_shutdown(void)
+{
+ struct syscore_ops *ops;
+
+ mutex_lock(&syscore_ops_lock);
+
+ list_for_each_entry_reverse(ops, &syscore_ops_list, node)
+ if (ops->shutdown) {
+ if (initcall_debug)
+ pr_info("PM: Calling %pF\n", ops->shutdown);
+ ops->shutdown();
+ }
+
+ mutex_unlock(&syscore_ops_lock);
+}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index b9ba04f..77fc76f 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3281,7 +3281,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
struct block_device *bdev = opened_bdev[cnt];
if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
continue;
- __invalidate_device(bdev);
+ __invalidate_device(bdev, true);
}
mutex_unlock(&open_lock);
} else {
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 49e6a54..dbf31ec 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -78,7 +78,6 @@
#include <asm/uaccess.h>
-static DEFINE_MUTEX(loop_mutex);
static LIST_HEAD(loop_devices);
static DEFINE_MUTEX(loop_devices_mutex);
@@ -1501,11 +1500,9 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
{
struct loop_device *lo = bdev->bd_disk->private_data;
- mutex_lock(&loop_mutex);
mutex_lock(&lo->lo_ctl_mutex);
lo->lo_refcnt++;
mutex_unlock(&lo->lo_ctl_mutex);
- mutex_unlock(&loop_mutex);
return 0;
}
@@ -1515,7 +1512,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
struct loop_device *lo = disk->private_data;
int err;
- mutex_lock(&loop_mutex);
mutex_lock(&lo->lo_ctl_mutex);
if (--lo->lo_refcnt)
@@ -1540,7 +1536,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
out:
mutex_unlock(&lo->lo_ctl_mutex);
out_unlocked:
- mutex_unlock(&loop_mutex);
return 0;
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index d7aa39e..9cb8668 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -120,6 +120,10 @@ static DEFINE_SPINLOCK(minor_lock);
#define EXTENDED (1<<EXT_SHIFT)
#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
+#define EMULATED_HD_DISK_MINOR_OFFSET (0)
+#define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
+#define EMULATED_SD_DISK_MINOR_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET + (4 * 16))
+#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_HD_DISK_NAME_OFFSET + 4)
#define DEV_NAME "xvd" /* name in /dev */
@@ -281,7 +285,7 @@ static int blkif_queue_request(struct request *req)
info->shadow[id].request = req;
ring_req->id = id;
- ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
+ ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
ring_req->handle = info->handle;
ring_req->operation = rq_data_dir(req) ?
@@ -317,7 +321,7 @@ static int blkif_queue_request(struct request *req)
rq_data_dir(req) );
info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
- ring_req->seg[i] =
+ ring_req->u.rw.seg[i] =
(struct blkif_request_segment) {
.gref = ref,
.first_sect = fsect,
@@ -434,6 +438,65 @@ static void xlvbd_flush(struct blkfront_info *info)
info->feature_flush ? "enabled" : "disabled");
}
+static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
+{
+ int major;
+ major = BLKIF_MAJOR(vdevice);
+ *minor = BLKIF_MINOR(vdevice);
+ switch (major) {
+ case XEN_IDE0_MAJOR:
+ *offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET;
+ *minor = ((*minor / 64) * PARTS_PER_DISK) +
+ EMULATED_HD_DISK_MINOR_OFFSET;
+ break;
+ case XEN_IDE1_MAJOR:
+ *offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET;
+ *minor = (((*minor / 64) + 2) * PARTS_PER_DISK) +
+ EMULATED_HD_DISK_MINOR_OFFSET;
+ break;
+ case XEN_SCSI_DISK0_MAJOR:
+ *offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET;
+ *minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET;
+ break;
+ case XEN_SCSI_DISK1_MAJOR:
+ case XEN_SCSI_DISK2_MAJOR:
+ case XEN_SCSI_DISK3_MAJOR:
+ case XEN_SCSI_DISK4_MAJOR:
+ case XEN_SCSI_DISK5_MAJOR:
+ case XEN_SCSI_DISK6_MAJOR:
+ case XEN_SCSI_DISK7_MAJOR:
+ *offset = (*minor / PARTS_PER_DISK) +
+ ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) +
+ EMULATED_SD_DISK_NAME_OFFSET;
+ *minor = *minor +
+ ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) +
+ EMULATED_SD_DISK_MINOR_OFFSET;
+ break;
+ case XEN_SCSI_DISK8_MAJOR:
+ case XEN_SCSI_DISK9_MAJOR:
+ case XEN_SCSI_DISK10_MAJOR:
+ case XEN_SCSI_DISK11_MAJOR:
+ case XEN_SCSI_DISK12_MAJOR:
+ case XEN_SCSI_DISK13_MAJOR:
+ case XEN_SCSI_DISK14_MAJOR:
+ case XEN_SCSI_DISK15_MAJOR:
+ *offset = (*minor / PARTS_PER_DISK) +
+ ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) +
+ EMULATED_SD_DISK_NAME_OFFSET;
+ *minor = *minor +
+ ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) +
+ EMULATED_SD_DISK_MINOR_OFFSET;
+ break;
+ case XENVBD_MAJOR:
+ *offset = *minor / PARTS_PER_DISK;
+ break;
+ default:
+ printk(KERN_WARNING "blkfront: your disk configuration is "
+ "incorrect, please use an xvd device instead\n");
+ return -ENODEV;
+ }
+ return 0;
+}
static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
struct blkfront_info *info,
@@ -441,7 +504,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
{
struct gendisk *gd;
int nr_minors = 1;
- int err = -ENODEV;
+ int err;
unsigned int offset;
int minor;
int nr_parts;
@@ -456,12 +519,20 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
}
if (!VDEV_IS_EXTENDED(info->vdevice)) {
- minor = BLKIF_MINOR(info->vdevice);
- nr_parts = PARTS_PER_DISK;
+ err = xen_translate_vdev(info->vdevice, &minor, &offset);
+ if (err)
+ return err;
+ nr_parts = PARTS_PER_DISK;
} else {
minor = BLKIF_MINOR_EXT(info->vdevice);
nr_parts = PARTS_PER_EXT_DISK;
+ offset = minor / nr_parts;
+ if (xen_hvm_domain() && offset <= EMULATED_HD_DISK_NAME_OFFSET + 4)
+ printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
+ "emulated IDE disks,\n\t choose an xvd device name"
+ "from xvde on\n", info->vdevice);
}
+ err = -ENODEV;
if ((minor % nr_parts) == 0)
nr_minors = nr_parts;
@@ -475,8 +546,6 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
if (gd == NULL)
goto release;
- offset = minor / nr_parts;
-
if (nr_minors > 1) {
if (offset < 26)
sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset);
@@ -615,7 +684,7 @@ static void blkif_completion(struct blk_shadow *s)
{
int i;
for (i = 0; i < s->req.nr_segments; i++)
- gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
+ gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL);
}
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
@@ -932,7 +1001,7 @@ static int blkif_recover(struct blkfront_info *info)
/* Rewrite any grant references invalidated by susp/resume. */
for (j = 0; j < req->nr_segments; j++)
gnttab_grant_foreign_access_ref(
- req->seg[j].gref,
+ req->u.rw.seg[j].gref,
info->xbdev->otherend_id,
pfn_to_mfn(info->shadow[req->id].frame[j]),
rq_data_dir(info->shadow[req->id].request));
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index a126e61..6dcd55a 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -39,6 +39,11 @@ static struct usb_device_id ath3k_table[] = {
/* Atheros AR3011 with sflash firmware*/
{ USB_DEVICE(0x0CF3, 0x3002) },
+ /* Atheros AR9285 Malbec with sflash firmware */
+ { USB_DEVICE(0x03F0, 0x311D) },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xE02C) },
{ } /* Terminating entry */
};
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 1da773f8..700a384 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -102,6 +102,12 @@ static struct usb_device_id blacklist_table[] = {
/* Atheros 3011 with sflash firmware */
{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
+ /* Atheros AR9285 Malbec with sflash firmware */
+ { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
+
+ /* Atheros AR5BBU12 with sflash firmware */
+ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
@@ -826,7 +832,7 @@ static void btusb_work(struct work_struct *work)
if (hdev->conn_hash.sco_num > 0) {
if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) {
- err = usb_autopm_get_interface(data->isoc);
+ err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf);
if (err < 0) {
clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
usb_kill_anchored_urbs(&data->isoc_anchor);
@@ -855,7 +861,7 @@ static void btusb_work(struct work_struct *work)
__set_isoc_interface(hdev, 0);
if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags))
- usb_autopm_put_interface(data->isoc);
+ usb_autopm_put_interface(data->isoc ? data->isoc : data->intf);
}
}
@@ -1038,8 +1044,6 @@ static int btusb_probe(struct usb_interface *intf,
usb_set_intfdata(intf, data);
- usb_enable_autosuspend(interface_to_usbdev(intf));
-
return 0;
}
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 9252e85..780498d 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -773,18 +773,23 @@ int __init agp_amd64_init(void)
#else
printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n");
#endif
+ pci_unregister_driver(&agp_amd64_pci_driver);
return -ENODEV;
}
/* First check that we have at least one AMD64 NB */
- if (!pci_dev_present(amd_nb_misc_ids))
+ if (!pci_dev_present(amd_nb_misc_ids)) {
+ pci_unregister_driver(&agp_amd64_pci_driver);
return -ENODEV;
+ }
/* Look for any AGP bridge */
agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
err = driver_attach(&agp_amd64_pci_driver.driver);
- if (err == 0 && agp_bridges_found == 0)
+ if (err == 0 && agp_bridges_found == 0) {
+ pci_unregister_driver(&agp_amd64_pci_driver);
err = -ENODEV;
+ }
}
return err;
}
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index c195bfe..5feebe2 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -130,6 +130,7 @@
#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
#define I915_IFPADDR 0x60
+#define I830_HIC 0x70
/* Intel 965G registers */
#define I965_MSAC 0x62
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index fab3d32..0d09b53 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/agp_backend.h>
+#include <linux/delay.h>
#include <asm/smp.h>
#include "agp.h"
#include "intel-agp.h"
@@ -70,12 +71,8 @@ static struct _intel_private {
u32 __iomem *gtt; /* I915G */
bool clear_fake_agp; /* on first access via agp, fill with scratch */
int num_dcache_entries;
- union {
- void __iomem *i9xx_flush_page;
- void *i8xx_flush_page;
- };
+ void __iomem *i9xx_flush_page;
char *i81x_gtt_table;
- struct page *i8xx_page;
struct resource ifp_resource;
int resource_valid;
struct page *scratch_page;
@@ -722,28 +719,6 @@ static int intel_fake_agp_fetch_size(void)
static void i830_cleanup(void)
{
- if (intel_private.i8xx_flush_page) {
- kunmap(intel_private.i8xx_flush_page);
- intel_private.i8xx_flush_page = NULL;
- }
-
- __free_page(intel_private.i8xx_page);
- intel_private.i8xx_page = NULL;
-}
-
-static void intel_i830_setup_flush(void)
-{
- /* return if we've already set the flush mechanism up */
- if (intel_private.i8xx_page)
- return;
-
- intel_private.i8xx_page = alloc_page(GFP_KERNEL);
- if (!intel_private.i8xx_page)
- return;
-
- intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
- if (!intel_private.i8xx_flush_page)
- i830_cleanup();
}
/* The chipset_flush interface needs to get data that has already been
@@ -758,14 +733,27 @@ static void intel_i830_setup_flush(void)
*/
static void i830_chipset_flush(void)
{
- unsigned int *pg = intel_private.i8xx_flush_page;
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ /* Forcibly evict everything from the CPU write buffers.
+ * clflush appears to be insufficient.
+ */
+ wbinvd_on_all_cpus();
+
+ /* Now we've only seen documents for this magic bit on 855GM,
+ * we hope it exists for the other gen2 chipsets...
+ *
+ * Also works as advertised on my 845G.
+ */
+ writel(readl(intel_private.registers+I830_HIC) | (1<<31),
+ intel_private.registers+I830_HIC);
- memset(pg, 0, 1024);
+ while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
+ if (time_after(jiffies, timeout))
+ break;
- if (cpu_has_clflush)
- clflush_cache_range(pg, 1024);
- else if (wbinvd_on_all_cpus() != 0)
- printk(KERN_ERR "Timed out waiting for cache flush.\n");
+ udelay(50);
+ }
}
static void i830_write_entry(dma_addr_t addr, unsigned int entry,
@@ -849,8 +837,6 @@ static int i830_setup(void)
intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
- intel_i830_setup_flush();
-
return 0;
}
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index d31483c..beecd1c 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -198,3 +198,15 @@ config HW_RANDOM_NOMADIK
module will be called nomadik-rng.
If unsure, say Y.
+
+config HW_RANDOM_PICOXCELL
+ tristate "Picochip picoXcell true random number generator support"
+ depends on HW_RANDOM && ARCH_PICOXCELL && PICOXCELL_PC3X3
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Picochip PC3x3 and later devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called picoxcell-rng.
+
+ If unsure, say Y.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 4273308..3db4eb8 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -19,3 +19,4 @@ obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
+obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 06aad08..2cc755a 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -91,7 +91,7 @@ static struct hwrng omap_rng_ops = {
static int __devinit omap_rng_probe(struct platform_device *pdev)
{
- struct resource *res, *mem;
+ struct resource *res;
int ret;
/*
@@ -116,14 +116,12 @@ static int __devinit omap_rng_probe(struct platform_device *pdev)
if (!res)
return -ENOENT;
- mem = request_mem_region(res->start, resource_size(res),
- pdev->name);
- if (mem == NULL) {
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
ret = -EBUSY;
goto err_region;
}
- dev_set_drvdata(&pdev->dev, mem);
+ dev_set_drvdata(&pdev->dev, res);
rng_base = ioremap(res->start, resource_size(res));
if (!rng_base) {
ret = -ENOMEM;
@@ -146,7 +144,7 @@ err_register:
iounmap(rng_base);
rng_base = NULL;
err_ioremap:
- release_resource(mem);
+ release_mem_region(res->start, resource_size(res));
err_region:
if (cpu_is_omap24xx()) {
clk_disable(rng_ick);
@@ -157,7 +155,7 @@ err_region:
static int __exit omap_rng_remove(struct platform_device *pdev)
{
- struct resource *mem = dev_get_drvdata(&pdev->dev);
+ struct resource *res = dev_get_drvdata(&pdev->dev);
hwrng_unregister(&omap_rng_ops);
@@ -170,7 +168,7 @@ static int __exit omap_rng_remove(struct platform_device *pdev)
clk_put(rng_ick);
}
- release_resource(mem);
+ release_mem_region(res->start, resource_size(res));
rng_base = NULL;
return 0;
diff --git a/drivers/char/hw_random/picoxcell-rng.c b/drivers/char/hw_random/picoxcell-rng.c
new file mode 100644
index 0000000..990d55a
--- /dev/null
+++ b/drivers/char/hw_random/picoxcell-rng.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * All enquiries to support@picochip.com
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define DATA_REG_OFFSET 0x0200
+#define CSR_REG_OFFSET 0x0278
+#define CSR_OUT_EMPTY_MASK (1 << 24)
+#define CSR_FAULT_MASK (1 << 1)
+#define TRNG_BLOCK_RESET_MASK (1 << 0)
+#define TAI_REG_OFFSET 0x0380
+
+/*
+ * The maximum amount of time in microseconds to spend waiting for data if the
+ * core wants us to wait. The TRNG should generate 32 bits every 320ns so a
+ * timeout of 20us seems reasonable. The TRNG does builtin tests of the data
+ * for randomness so we can't always assume there is data present.
+ */
+#define PICO_TRNG_TIMEOUT 20
+
+static void __iomem *rng_base;
+static struct clk *rng_clk;
+struct device *rng_dev;
+
+static inline u32 picoxcell_trng_read_csr(void)
+{
+ return __raw_readl(rng_base + CSR_REG_OFFSET);
+}
+
+static inline bool picoxcell_trng_is_empty(void)
+{
+ return picoxcell_trng_read_csr() & CSR_OUT_EMPTY_MASK;
+}
+
+/*
+ * Take the random number generator out of reset and make sure the interrupts
+ * are masked. We shouldn't need to get large amounts of random bytes so just
+ * poll the status register. The hardware generates 32 bits every 320ns so we
+ * shouldn't have to wait long enough to warrant waiting for an IRQ.
+ */
+static void picoxcell_trng_start(void)
+{
+ __raw_writel(0, rng_base + TAI_REG_OFFSET);
+ __raw_writel(0, rng_base + CSR_REG_OFFSET);
+}
+
+static void picoxcell_trng_reset(void)
+{
+ __raw_writel(TRNG_BLOCK_RESET_MASK, rng_base + CSR_REG_OFFSET);
+ __raw_writel(TRNG_BLOCK_RESET_MASK, rng_base + TAI_REG_OFFSET);
+ picoxcell_trng_start();
+}
+
+/*
+ * Get some random data from the random number generator. The hw_random core
+ * layer provides us with locking.
+ */
+static int picoxcell_trng_read(struct hwrng *rng, void *buf, size_t max,
+ bool wait)
+{
+ int i;
+
+ /* Wait for some data to become available. */
+ for (i = 0; i < PICO_TRNG_TIMEOUT && picoxcell_trng_is_empty(); ++i) {
+ if (!wait)
+ return 0;
+
+ udelay(1);
+ }
+
+ if (picoxcell_trng_read_csr() & CSR_FAULT_MASK) {
+ dev_err(rng_dev, "fault detected, resetting TRNG\n");
+ picoxcell_trng_reset();
+ return -EIO;
+ }
+
+ if (i == PICO_TRNG_TIMEOUT)
+ return 0;
+
+ *(u32 *)buf = __raw_readl(rng_base + DATA_REG_OFFSET);
+ return sizeof(u32);
+}
+
+static struct hwrng picoxcell_trng = {
+ .name = "picoxcell",
+ .read = picoxcell_trng_read,
+};
+
+static int picoxcell_trng_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!mem) {
+ dev_warn(&pdev->dev, "no memory resource\n");
+ return -ENOMEM;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
+ "picoxcell_trng")) {
+ dev_warn(&pdev->dev, "unable to request io mem\n");
+ return -EBUSY;
+ }
+
+ rng_base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+ if (!rng_base) {
+ dev_warn(&pdev->dev, "unable to remap io mem\n");
+ return -ENOMEM;
+ }
+
+ rng_clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(rng_clk)) {
+ dev_warn(&pdev->dev, "no clk\n");
+ return PTR_ERR(rng_clk);
+ }
+
+ ret = clk_enable(rng_clk);
+ if (ret) {
+ dev_warn(&pdev->dev, "unable to enable clk\n");
+ goto err_enable;
+ }
+
+ picoxcell_trng_start();
+ ret = hwrng_register(&picoxcell_trng);
+ if (ret)
+ goto err_register;
+
+ rng_dev = &pdev->dev;
+ dev_info(&pdev->dev, "pixoxcell random number generator active\n");
+
+ return 0;
+
+err_register:
+ clk_disable(rng_clk);
+err_enable:
+ clk_put(rng_clk);
+
+ return ret;
+}
+
+static int __devexit picoxcell_trng_remove(struct platform_device *pdev)
+{
+ hwrng_unregister(&picoxcell_trng);
+ clk_disable(rng_clk);
+ clk_put(rng_clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int picoxcell_trng_suspend(struct device *dev)
+{
+ clk_disable(rng_clk);
+
+ return 0;
+}
+
+static int picoxcell_trng_resume(struct device *dev)
+{
+ return clk_enable(rng_clk);
+}
+
+static const struct dev_pm_ops picoxcell_trng_pm_ops = {
+ .suspend = picoxcell_trng_suspend,
+ .resume = picoxcell_trng_resume,
+};
+#endif /* CONFIG_PM */
+
+static struct platform_driver picoxcell_trng_driver = {
+ .probe = picoxcell_trng_probe,
+ .remove = __devexit_p(picoxcell_trng_remove),
+ .driver = {
+ .name = "picoxcell-trng",
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &picoxcell_trng_pm_ops,
+#endif /* CONFIG_PM */
+ },
+};
+
+static int __init picoxcell_trng_init(void)
+{
+ return platform_driver_register(&picoxcell_trng_driver);
+}
+module_init(picoxcell_trng_init);
+
+static void __exit picoxcell_trng_exit(void)
+{
+ platform_driver_unregister(&picoxcell_trng_driver);
+}
+module_exit(picoxcell_trng_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jamie Iles");
+MODULE_DESCRIPTION("Picochip picoXcell TRNG driver");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 7855f9f..62787e3 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -900,6 +900,14 @@ static void sender(void *send_info,
printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
+ /*
+ * last_timeout_jiffies is updated here to avoid
+ * smi_timeout() handler passing very large time_diff
+ * value to smi_event_handler() that causes
+ * the send command to abort.
+ */
+ smi_info->last_timeout_jiffies = jiffies;
+
mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
if (smi_info->thread)
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index e6d7562..33dc229 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -53,6 +53,8 @@ MODULE_LICENSE("GPL");
#define RTC_BITS 55 /* 55 bits for this implementation */
+static struct k_clock sgi_clock;
+
extern unsigned long sn_rtc_cycles_per_second;
#define RTC_COUNTER_ADDR ((long *)LOCAL_MMR_ADDR(SH_RTC))
@@ -487,7 +489,7 @@ static int sgi_clock_get(clockid_t clockid, struct timespec *tp)
return 0;
};
-static int sgi_clock_set(clockid_t clockid, struct timespec *tp)
+static int sgi_clock_set(const clockid_t clockid, const struct timespec *tp)
{
u64 nsec;
@@ -763,15 +765,21 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
return err;
}
+static int sgi_clock_getres(const clockid_t which_clock, struct timespec *tp)
+{
+ tp->tv_sec = 0;
+ tp->tv_nsec = sgi_clock_period;
+ return 0;
+}
+
static struct k_clock sgi_clock = {
- .res = 0,
- .clock_set = sgi_clock_set,
- .clock_get = sgi_clock_get,
- .timer_create = sgi_timer_create,
- .nsleep = do_posix_clock_nonanosleep,
- .timer_set = sgi_timer_set,
- .timer_del = sgi_timer_del,
- .timer_get = sgi_timer_get
+ .clock_set = sgi_clock_set,
+ .clock_get = sgi_clock_get,
+ .clock_getres = sgi_clock_getres,
+ .timer_create = sgi_timer_create,
+ .timer_set = sgi_timer_set,
+ .timer_del = sgi_timer_del,
+ .timer_get = sgi_timer_get
};
/**
@@ -831,8 +839,8 @@ static int __init mmtimer_init(void)
(unsigned long) node);
}
- sgi_clock_period = sgi_clock.res = NSEC_PER_SEC / sn_rtc_cycles_per_second;
- register_posix_clock(CLOCK_SGI_CYCLE, &sgi_clock);
+ sgi_clock_period = NSEC_PER_SEC / sn_rtc_cycles_per_second;
+ posix_timers_register_clock(CLOCK_SGI_CYCLE, &sgi_clock);
printk(KERN_INFO "%s: v%s, %ld MHz\n", MMTIMER_DESC, MMTIMER_VERSION,
sn_rtc_cycles_per_second/(unsigned long)1E6);
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 777181a..bcbbc71 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -830,8 +830,7 @@ static void monitor_card(unsigned long p)
test_bit(IS_ANY_T1, &dev->flags))) {
DEBUGP(4, dev, "Perform AUTOPPS\n");
set_bit(IS_AUTOPPS_ACT, &dev->flags);
- ptsreq.protocol = ptsreq.protocol =
- (0x01 << dev->proto);
+ ptsreq.protocol = (0x01 << dev->proto);
ptsreq.flags = 0x01;
ptsreq.pts1 = 0x00;
ptsreq.pts2 = 0x00;
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c
index 94b8eb4..444155a 100644
--- a/drivers/char/pcmcia/ipwireless/main.c
+++ b/drivers/char/pcmcia/ipwireless/main.c
@@ -78,7 +78,6 @@ static void signalled_reboot_callback(void *callback_data)
static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
{
struct ipw_dev *ipw = priv_data;
- struct resource *io_resource;
int ret;
p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
@@ -92,9 +91,12 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
if (ret)
return ret;
- io_resource = request_region(p_dev->resource[0]->start,
- resource_size(p_dev->resource[0]),
- IPWIRELESS_PCCARD_NAME);
+ if (!request_region(p_dev->resource[0]->start,
+ resource_size(p_dev->resource[0]),
+ IPWIRELESS_PCCARD_NAME)) {
+ ret = -EBUSY;
+ goto exit;
+ }
p_dev->resource[2]->flags |=
WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE;
@@ -105,22 +107,25 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr);
if (ret != 0)
- goto exit2;
+ goto exit1;
ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100;
- ipw->attr_memory = ioremap(p_dev->resource[2]->start,
+ ipw->common_memory = ioremap(p_dev->resource[2]->start,
resource_size(p_dev->resource[2]));
- request_mem_region(p_dev->resource[2]->start,
- resource_size(p_dev->resource[2]),
- IPWIRELESS_PCCARD_NAME);
+ if (!request_mem_region(p_dev->resource[2]->start,
+ resource_size(p_dev->resource[2]),
+ IPWIRELESS_PCCARD_NAME)) {
+ ret = -EBUSY;
+ goto exit2;
+ }
p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM |
WIN_ENABLE;
p_dev->resource[3]->end = 0; /* this used to be 0x1000 */
ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0);
if (ret != 0)
- goto exit2;
+ goto exit3;
ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0);
if (ret != 0)
@@ -128,23 +133,28 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
ipw->attr_memory = ioremap(p_dev->resource[3]->start,
resource_size(p_dev->resource[3]));
- request_mem_region(p_dev->resource[3]->start,
- resource_size(p_dev->resource[3]),
- IPWIRELESS_PCCARD_NAME);
+ if (!request_mem_region(p_dev->resource[3]->start,
+ resource_size(p_dev->resource[3]),
+ IPWIRELESS_PCCARD_NAME)) {
+ ret = -EBUSY;
+ goto exit4;
+ }
return 0;
+exit4:
+ iounmap(ipw->attr_memory);
exit3:
+ release_mem_region(p_dev->resource[2]->start,
+ resource_size(p_dev->resource[2]));
exit2:
- if (ipw->common_memory) {
- release_mem_region(p_dev->resource[2]->start,
- resource_size(p_dev->resource[2]));
- iounmap(ipw->common_memory);
- }
+ iounmap(ipw->common_memory);
exit1:
- release_resource(io_resource);
+ release_region(p_dev->resource[0]->start,
+ resource_size(p_dev->resource[0]));
+exit:
pcmcia_disable_device(p_dev);
- return -1;
+ return ret;
}
static int config_ipwireless(struct ipw_dev *ipw)
@@ -219,6 +229,8 @@ exit:
static void release_ipwireless(struct ipw_dev *ipw)
{
+ release_region(ipw->link->resource[0]->start,
+ resource_size(ipw->link->resource[0]));
if (ipw->common_memory) {
release_mem_region(ipw->link->resource[2]->start,
resource_size(ipw->link->resource[2]));
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 72a4fcb..5e29e80 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -128,6 +128,7 @@
* void add_input_randomness(unsigned int type, unsigned int code,
* unsigned int value);
* void add_interrupt_randomness(int irq);
+ * void add_disk_randomness(struct gendisk *disk);
*
* add_input_randomness() uses the input layer interrupt timing, as well as
* the event type information from the hardware.
@@ -136,9 +137,15 @@
* inputs to the entropy pool. Note that not all interrupts are good
* sources of randomness! For example, the timer interrupts is not a
* good choice, because the periodicity of the interrupts is too
- * regular, and hence predictable to an attacker. Disk interrupts are
- * a better measure, since the timing of the disk interrupts are more
- * unpredictable.
+ * regular, and hence predictable to an attacker. Network Interface
+ * Controller interrupts are a better measure, since the timing of the
+ * NIC interrupts are more unpredictable.
+ *
+ * add_disk_randomness() uses what amounts to the seek time of block
+ * layer request events, on a per-disk_devt basis, as input to the
+ * entropy pool. Note that high-speed solid state drives with very low
+ * seek times do not make for good sources of entropy, as their seek
+ * times are usually fairly consistent.
*
* All of these routines try to estimate how many bits of randomness a
* particular randomness source. They do this by keeping track of the
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index faf5a2c..1f46f1c 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -364,14 +364,12 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
tpm_protected_ordinal_duration[ordinal &
TPM_PROTECTED_ORDINAL_MASK];
- if (duration_idx != TPM_UNDEFINED) {
+ if (duration_idx != TPM_UNDEFINED)
duration = chip->vendor.duration[duration_idx];
- /* if duration is 0, it's because chip->vendor.duration wasn't */
- /* filled yet, so we set the lowest timeout just to give enough */
- /* time for tpm_get_timeouts() to succeed */
- return (duration <= 0 ? HZ : duration);
- } else
+ if (duration <= 0)
return 2 * 60 * HZ;
+ else
+ return duration;
}
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
@@ -577,11 +575,9 @@ duration:
if (rc)
return;
- if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
- be32_to_cpu(tpm_cmd.header.out.length)
- != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
+ if (be32_to_cpu(tpm_cmd.header.out.return_code)
+ != 3 * sizeof(u32))
return;
-
duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
chip->vendor.duration[TPM_SHORT] =
usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
@@ -941,18 +937,6 @@ ssize_t tpm_show_caps_1_2(struct device * dev,
}
EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
-ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct tpm_chip *chip = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d %d %d\n",
- jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
- jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
- jiffies_to_usecs(chip->vendor.duration[TPM_LONG]));
-}
-EXPORT_SYMBOL_GPL(tpm_show_timeouts);
-
ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index d84ff77..72ddb03 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -56,8 +56,6 @@ extern ssize_t tpm_show_owned(struct device *, struct device_attribute *attr,
char *);
extern ssize_t tpm_show_temp_deactivated(struct device *,
struct device_attribute *attr, char *);
-extern ssize_t tpm_show_timeouts(struct device *,
- struct device_attribute *attr, char *);
struct tpm_chip;
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 0d1d38e..dd21df5 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -376,7 +376,6 @@ static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
NULL);
static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
-static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
static struct attribute *tis_attrs[] = {
&dev_attr_pubek.attr,
@@ -386,8 +385,7 @@ static struct attribute *tis_attrs[] = {
&dev_attr_owned.attr,
&dev_attr_temp_deactivated.attr,
&dev_attr_caps.attr,
- &dev_attr_cancel.attr,
- &dev_attr_timeouts.attr, NULL,
+ &dev_attr_cancel.attr, NULL,
};
static struct attribute_group tis_attr_grp = {
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 4903931..84b164d 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -388,6 +388,10 @@ static void discard_port_data(struct port *port)
unsigned int len;
int ret;
+ if (!port->portdev) {
+ /* Device has been unplugged. vqs are already gone. */
+ return;
+ }
vq = port->in_vq;
if (port->inbuf)
buf = port->inbuf;
@@ -470,6 +474,10 @@ static void reclaim_consumed_buffers(struct port *port)
void *buf;
unsigned int len;
+ if (!port->portdev) {
+ /* Device has been unplugged. vqs are already gone. */
+ return;
+ }
while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
kfree(buf);
port->outvq_full = false;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1109f68..5cb4d09 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1919,8 +1919,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
ret = sysdev_driver_register(&cpu_sysdev_class,
&cpufreq_sysdev_driver);
+ if (ret)
+ goto err_null_driver;
- if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
+ if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
int i;
ret = -ENODEV;
@@ -1935,21 +1937,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (ret) {
dprintk("no CPU initialized for driver %s\n",
driver_data->name);
- sysdev_driver_unregister(&cpu_sysdev_class,
- &cpufreq_sysdev_driver);
-
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- cpufreq_driver = NULL;
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ goto err_sysdev_unreg;
}
}
- if (!ret) {
- register_hotcpu_notifier(&cpufreq_cpu_notifier);
- dprintk("driver %s up and running\n", driver_data->name);
- cpufreq_debug_enable_ratelimit();
- }
+ register_hotcpu_notifier(&cpufreq_cpu_notifier);
+ dprintk("driver %s up and running\n", driver_data->name);
+ cpufreq_debug_enable_ratelimit();
+ return 0;
+err_sysdev_unreg:
+ sysdev_driver_unregister(&cpu_sysdev_class,
+ &cpufreq_sysdev_driver);
+err_null_driver:
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ cpufreq_driver = NULL;
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 526bfbf..94284c8 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -81,8 +81,6 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
*/
static DEFINE_MUTEX(dbs_mutex);
-static struct workqueue_struct *kconservative_wq;
-
static struct dbs_tuners {
unsigned int sampling_rate;
unsigned int sampling_down_factor;
@@ -560,7 +558,7 @@ static void do_dbs_timer(struct work_struct *work)
dbs_check_cpu(dbs_info);
- queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay);
+ schedule_delayed_work_on(cpu, &dbs_info->work, delay);
mutex_unlock(&dbs_info->timer_mutex);
}
@@ -572,8 +570,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
dbs_info->enable = 1;
INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
- queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work,
- delay);
+ schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
}
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
@@ -716,25 +713,12 @@ struct cpufreq_governor cpufreq_gov_conservative = {
static int __init cpufreq_gov_dbs_init(void)
{
- int err;
-
- kconservative_wq = create_workqueue("kconservative");
- if (!kconservative_wq) {
- printk(KERN_ERR "Creation of kconservative failed\n");
- return -EFAULT;
- }
-
- err = cpufreq_register_governor(&cpufreq_gov_conservative);
- if (err)
- destroy_workqueue(kconservative_wq);
-
- return err;
+ return cpufreq_register_governor(&cpufreq_gov_conservative);
}
static void __exit cpufreq_gov_dbs_exit(void)
{
cpufreq_unregister_governor(&cpufreq_gov_conservative);
- destroy_workqueue(kconservative_wq);
}
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index c631f27..58aa85e 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -104,8 +104,6 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
*/
static DEFINE_MUTEX(dbs_mutex);
-static struct workqueue_struct *kondemand_wq;
-
static struct dbs_tuners {
unsigned int sampling_rate;
unsigned int up_threshold;
@@ -667,7 +665,7 @@ static void do_dbs_timer(struct work_struct *work)
__cpufreq_driver_target(dbs_info->cur_policy,
dbs_info->freq_lo, CPUFREQ_RELATION_H);
}
- queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
+ schedule_delayed_work_on(cpu, &dbs_info->work, delay);
mutex_unlock(&dbs_info->timer_mutex);
}
@@ -681,8 +679,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
dbs_info->sample_type = DBS_NORMAL_SAMPLE;
INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
- queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
- delay);
+ schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
}
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
@@ -814,7 +811,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
static int __init cpufreq_gov_dbs_init(void)
{
- int err;
cputime64_t wall;
u64 idle_time;
int cpu = get_cpu();
@@ -838,22 +834,12 @@ static int __init cpufreq_gov_dbs_init(void)
MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
}
- kondemand_wq = create_workqueue("kondemand");
- if (!kondemand_wq) {
- printk(KERN_ERR "Creation of kondemand failed\n");
- return -EFAULT;
- }
- err = cpufreq_register_governor(&cpufreq_gov_ondemand);
- if (err)
- destroy_workqueue(kondemand_wq);
-
- return err;
+ return cpufreq_register_governor(&cpufreq_gov_ondemand);
}
static void __exit cpufreq_gov_dbs_exit(void)
{
cpufreq_unregister_governor(&cpufreq_gov_ondemand);
- destroy_workqueue(kondemand_wq);
}
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index eab2cf7..e541852 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -252,4 +252,21 @@ config CRYPTO_DEV_OMAP_AES
OMAP processors have AES module accelerator. Select this if you
want to use the OMAP module for AES algorithms.
+config CRYPTO_DEV_PICOXCELL
+ tristate "Support for picoXcell IPSEC and Layer2 crypto engines"
+ depends on ARCH_PICOXCELL
+ select CRYPTO_AES
+ select CRYPTO_AUTHENC
+ select CRYPTO_ALGAPI
+ select CRYPTO_DES
+ select CRYPTO_CBC
+ select CRYPTO_ECB
+ select CRYPTO_SEQIV
+ help
+ This option enables support for the hardware offload engines in the
+ Picochip picoXcell SoC devices. Select this for IPSEC ESP offload
+ and for 3gpp Layer 2 ciphering support.
+
+ Saying m here will build a module named pipcoxcell_crypto.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 2566973..5203e34 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -10,4 +10,4 @@ obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
-
+obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index add2a1a..5b970d9e 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -839,9 +839,9 @@ static int omap_aes_probe(struct platform_device *pdev)
/* Initializing the clock */
dd->iclk = clk_get(dev, "ick");
- if (!dd->iclk) {
+ if (IS_ERR(dd->iclk)) {
dev_err(dev, "clock intialization failed.\n");
- err = -ENODEV;
+ err = PTR_ERR(dd->iclk);
goto err_res;
}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 2e71123..465cde3 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1206,9 +1206,9 @@ static int __devinit omap_sham_probe(struct platform_device *pdev)
/* Initializing the clock */
dd->iclk = clk_get(dev, "ick");
- if (!dd->iclk) {
+ if (IS_ERR(dd->iclk)) {
dev_err(dev, "clock intialization failed.\n");
- err = -ENODEV;
+ err = PTR_ERR(dd->iclk);
goto clk_err;
}
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
new file mode 100644
index 0000000..b092d0a
--- /dev/null
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -0,0 +1,1867 @@
+/*
+ * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/authenc.h>
+#include <crypto/des.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/rtnetlink.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+#include "picoxcell_crypto_regs.h"
+
+/*
+ * The threshold for the number of entries in the CMD FIFO available before
+ * the CMD0_CNT interrupt is raised. Increasing this value will reduce the
+ * number of interrupts raised to the CPU.
+ */
+#define CMD0_IRQ_THRESHOLD 1
+
+/*
+ * The timeout period (in jiffies) for a PDU. When the the number of PDUs in
+ * flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled.
+ * When there are packets in flight but lower than the threshold, we enable
+ * the timer and at expiry, attempt to remove any processed packets from the
+ * queue and if there are still packets left, schedule the timer again.
+ */
+#define PACKET_TIMEOUT 1
+
+/* The priority to register each algorithm with. */
+#define SPACC_CRYPTO_ALG_PRIORITY 10000
+
+#define SPACC_CRYPTO_KASUMI_F8_KEY_LEN 16
+#define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 64
+#define SPACC_CRYPTO_IPSEC_HASH_PG_SZ 64
+#define SPACC_CRYPTO_IPSEC_MAX_CTXS 32
+#define SPACC_CRYPTO_IPSEC_FIFO_SZ 32
+#define SPACC_CRYPTO_L2_CIPHER_PG_SZ 64
+#define SPACC_CRYPTO_L2_HASH_PG_SZ 64
+#define SPACC_CRYPTO_L2_MAX_CTXS 128
+#define SPACC_CRYPTO_L2_FIFO_SZ 128
+
+#define MAX_DDT_LEN 16
+
+/* DDT format. This must match the hardware DDT format exactly. */
+struct spacc_ddt {
+ dma_addr_t p;
+ u32 len;
+};
+
+/*
+ * Asynchronous crypto request structure.
+ *
+ * This structure defines a request that is either queued for processing or
+ * being processed.
+ */
+struct spacc_req {
+ struct list_head list;
+ struct spacc_engine *engine;
+ struct crypto_async_request *req;
+ int result;
+ bool is_encrypt;
+ unsigned ctx_id;
+ dma_addr_t src_addr, dst_addr;
+ struct spacc_ddt *src_ddt, *dst_ddt;
+ void (*complete)(struct spacc_req *req);
+
+ /* AEAD specific bits. */
+ u8 *giv;
+ size_t giv_len;
+ dma_addr_t giv_pa;
+};
+
+struct spacc_engine {
+ void __iomem *regs;
+ struct list_head pending;
+ int next_ctx;
+ spinlock_t hw_lock;
+ int in_flight;
+ struct list_head completed;
+ struct list_head in_progress;
+ struct tasklet_struct complete;
+ unsigned long fifo_sz;
+ void __iomem *cipher_ctx_base;
+ void __iomem *hash_key_base;
+ struct spacc_alg *algs;
+ unsigned num_algs;
+ struct list_head registered_algs;
+ size_t cipher_pg_sz;
+ size_t hash_pg_sz;
+ const char *name;
+ struct clk *clk;
+ struct device *dev;
+ unsigned max_ctxs;
+ struct timer_list packet_timeout;
+ unsigned stat_irq_thresh;
+ struct dma_pool *req_pool;
+};
+
+/* Algorithm type mask. */
+#define SPACC_CRYPTO_ALG_MASK 0x7
+
+/* SPACC definition of a crypto algorithm. */
+struct spacc_alg {
+ unsigned long ctrl_default;
+ unsigned long type;
+ struct crypto_alg alg;
+ struct spacc_engine *engine;
+ struct list_head entry;
+ int key_offs;
+ int iv_offs;
+};
+
+/* Generic context structure for any algorithm type. */
+struct spacc_generic_ctx {
+ struct spacc_engine *engine;
+ int flags;
+ int key_offs;
+ int iv_offs;
+};
+
+/* Block cipher context. */
+struct spacc_ablk_ctx {
+ struct spacc_generic_ctx generic;
+ u8 key[AES_MAX_KEY_SIZE];
+ u8 key_len;
+ /*
+ * The fallback cipher. If the operation can't be done in hardware,
+ * fallback to a software version.
+ */
+ struct crypto_ablkcipher *sw_cipher;
+};
+
+/* AEAD cipher context. */
+struct spacc_aead_ctx {
+ struct spacc_generic_ctx generic;
+ u8 cipher_key[AES_MAX_KEY_SIZE];
+ u8 hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ];
+ u8 cipher_key_len;
+ u8 hash_key_len;
+ struct crypto_aead *sw_cipher;
+ size_t auth_size;
+ u8 salt[AES_BLOCK_SIZE];
+};
+
+static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)
+{
+ return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
+}
+
+static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)
+{
+ u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);
+
+ return fifo_stat & SPA_FIFO_CMD_FULL;
+}
+
+/*
+ * Given a cipher context, and a context number, get the base address of the
+ * context page.
+ *
+ * Returns the address of the context page where the key/context may
+ * be written.
+ */
+static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx,
+ unsigned indx,
+ bool is_cipher_ctx)
+{
+ return is_cipher_ctx ? ctx->engine->cipher_ctx_base +
+ (indx * ctx->engine->cipher_pg_sz) :
+ ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz);
+}
+
+/* The context pages can only be written with 32-bit accesses. */
+static inline void memcpy_toio32(u32 __iomem *dst, const void *src,
+ unsigned count)
+{
+ const u32 *src32 = (const u32 *) src;
+
+ while (count--)
+ writel(*src32++, dst++);
+}
+
+static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx,
+ void __iomem *page_addr, const u8 *key,
+ size_t key_len, const u8 *iv, size_t iv_len)
+{
+ void __iomem *key_ptr = page_addr + ctx->key_offs;
+ void __iomem *iv_ptr = page_addr + ctx->iv_offs;
+
+ memcpy_toio32(key_ptr, key, key_len / 4);
+ memcpy_toio32(iv_ptr, iv, iv_len / 4);
+}
+
+/*
+ * Load a context into the engines context memory.
+ *
+ * Returns the index of the context page where the context was loaded.
+ */
+static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx,
+ const u8 *ciph_key, size_t ciph_len,
+ const u8 *iv, size_t ivlen, const u8 *hash_key,
+ size_t hash_len)
+{
+ unsigned indx = ctx->engine->next_ctx++;
+ void __iomem *ciph_page_addr, *hash_page_addr;
+
+ ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1);
+ hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0);
+
+ ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1;
+ spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv,
+ ivlen);
+ writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) |
+ (1 << SPA_KEY_SZ_CIPHER_OFFSET),
+ ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
+
+ if (hash_key) {
+ memcpy_toio32(hash_page_addr, hash_key, hash_len / 4);
+ writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET),
+ ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
+ }
+
+ return indx;
+}
+
+/* Count the number of scatterlist entries in a scatterlist. */
+static int sg_count(struct scatterlist *sg_list, int nbytes)
+{
+ struct scatterlist *sg = sg_list;
+ int sg_nents = 0;
+
+ while (nbytes > 0) {
+ ++sg_nents;
+ nbytes -= sg->length;
+ sg = sg_next(sg);
+ }
+
+ return sg_nents;
+}
+
+static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len)
+{
+ ddt->p = phys;
+ ddt->len = len;
+}
+
+/*
+ * Take a crypto request and scatterlists for the data and turn them into DDTs
+ * for passing to the crypto engines. This also DMA maps the data so that the
+ * crypto engines can DMA to/from them.
+ */
+static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine,
+ struct scatterlist *payload,
+ unsigned nbytes,
+ enum dma_data_direction dir,
+ dma_addr_t *ddt_phys)
+{
+ unsigned nents, mapped_ents;
+ struct scatterlist *cur;
+ struct spacc_ddt *ddt;
+ int i;
+
+ nents = sg_count(payload, nbytes);
+ mapped_ents = dma_map_sg(engine->dev, payload, nents, dir);
+
+ if (mapped_ents + 1 > MAX_DDT_LEN)
+ goto out;
+
+ ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys);
+ if (!ddt)
+ goto out;
+
+ for_each_sg(payload, cur, mapped_ents, i)
+ ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur));
+ ddt_set(&ddt[mapped_ents], 0, 0);
+
+ return ddt;
+
+out:
+ dma_unmap_sg(engine->dev, payload, nents, dir);
+ return NULL;
+}
+
+static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv)
+{
+ struct aead_request *areq = container_of(req->req, struct aead_request,
+ base);
+ struct spacc_engine *engine = req->engine;
+ struct spacc_ddt *src_ddt, *dst_ddt;
+ unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq));
+ unsigned nents = sg_count(areq->src, areq->cryptlen);
+ dma_addr_t iv_addr;
+ struct scatterlist *cur;
+ int i, dst_ents, src_ents, assoc_ents;
+ u8 *iv = giv ? giv : areq->iv;
+
+ src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
+ if (!src_ddt)
+ return -ENOMEM;
+
+ dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
+ if (!dst_ddt) {
+ dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
+ return -ENOMEM;
+ }
+
+ req->src_ddt = src_ddt;
+ req->dst_ddt = dst_ddt;
+
+ assoc_ents = dma_map_sg(engine->dev, areq->assoc,
+ sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
+ if (areq->src != areq->dst) {
+ src_ents = dma_map_sg(engine->dev, areq->src, nents,
+ DMA_TO_DEVICE);
+ dst_ents = dma_map_sg(engine->dev, areq->dst, nents,
+ DMA_FROM_DEVICE);
+ } else {
+ src_ents = dma_map_sg(engine->dev, areq->src, nents,
+ DMA_BIDIRECTIONAL);
+ dst_ents = 0;
+ }
+
+ /*
+ * Map the IV/GIV. For the GIV it needs to be bidirectional as it is
+ * formed by the crypto block and sent as the ESP IV for IPSEC.
+ */
+ iv_addr = dma_map_single(engine->dev, iv, ivsize,
+ giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+ req->giv_pa = iv_addr;
+
+ /*
+ * Map the associated data. For decryption we don't copy the
+ * associated data.
+ */
+ for_each_sg(areq->assoc, cur, assoc_ents, i) {
+ ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
+ if (req->is_encrypt)
+ ddt_set(dst_ddt++, sg_dma_address(cur),
+ sg_dma_len(cur));
+ }
+ ddt_set(src_ddt++, iv_addr, ivsize);
+
+ if (giv || req->is_encrypt)
+ ddt_set(dst_ddt++, iv_addr, ivsize);
+
+ /*
+ * Now map in the payload for the source and destination and terminate
+ * with the NULL pointers.
+ */
+ for_each_sg(areq->src, cur, src_ents, i) {
+ ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
+ if (areq->src == areq->dst)
+ ddt_set(dst_ddt++, sg_dma_address(cur),
+ sg_dma_len(cur));
+ }
+
+ for_each_sg(areq->dst, cur, dst_ents, i)
+ ddt_set(dst_ddt++, sg_dma_address(cur),
+ sg_dma_len(cur));
+
+ ddt_set(src_ddt, 0, 0);
+ ddt_set(dst_ddt, 0, 0);
+
+ return 0;
+}
+
+static void spacc_aead_free_ddts(struct spacc_req *req)
+{
+ struct aead_request *areq = container_of(req->req, struct aead_request,
+ base);
+ struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg);
+ struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm);
+ struct spacc_engine *engine = aead_ctx->generic.engine;
+ unsigned ivsize = alg->alg.cra_aead.ivsize;
+ unsigned nents = sg_count(areq->src, areq->cryptlen);
+
+ if (areq->src != areq->dst) {
+ dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
+ dma_unmap_sg(engine->dev, areq->dst,
+ sg_count(areq->dst, areq->cryptlen),
+ DMA_FROM_DEVICE);
+ } else
+ dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
+
+ dma_unmap_sg(engine->dev, areq->assoc,
+ sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
+
+ dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL);
+
+ dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
+ dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
+}
+
+static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
+ dma_addr_t ddt_addr, struct scatterlist *payload,
+ unsigned nbytes, enum dma_data_direction dir)
+{
+ unsigned nents = sg_count(payload, nbytes);
+
+ dma_unmap_sg(req->engine->dev, payload, nents, dir);
+ dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
+}
+
+/*
+ * Set key for a DES operation in an AEAD cipher. This also performs weak key
+ * checking if required.
+ */
+static int spacc_aead_des_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 tmp[DES_EXPKEY_WORDS];
+
+ if (unlikely(!des_ekey(tmp, key)) &&
+ (crypto_aead_get_flags(aead)) & CRYPTO_TFM_REQ_WEAK_KEY) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ return -EINVAL;
+ }
+
+ memcpy(ctx->cipher_key, key, len);
+ ctx->cipher_key_len = len;
+
+ return 0;
+}
+
+/* Set the key for the AES block cipher component of the AEAD transform. */
+static int spacc_aead_aes_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ /*
+ * IPSec engine only supports 128 and 256 bit AES keys. If we get a
+ * request for any other size (192 bits) then we need to do a software
+ * fallback.
+ */
+ if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
+ /*
+ * Set the fallback transform to use the same request flags as
+ * the hardware transform.
+ */
+ ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ ctx->sw_cipher->base.crt_flags |=
+ tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
+ return crypto_aead_setkey(ctx->sw_cipher, key, len);
+ }
+
+ memcpy(ctx->cipher_key, key, len);
+ ctx->cipher_key_len = len;
+
+ return 0;
+}
+
+static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
+ struct rtattr *rta = (void *)key;
+ struct crypto_authenc_key_param *param;
+ unsigned int authkeylen, enckeylen;
+ int err = -EINVAL;
+
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+
+ if (keylen < enckeylen)
+ goto badkey;
+
+ authkeylen = keylen - enckeylen;
+
+ if (enckeylen > AES_MAX_KEY_SIZE)
+ goto badkey;
+
+ if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
+ SPA_CTRL_CIPH_ALG_AES)
+ err = spacc_aead_aes_setkey(tfm, key + authkeylen, enckeylen);
+ else
+ err = spacc_aead_des_setkey(tfm, key + authkeylen, enckeylen);
+
+ if (err)
+ goto badkey;
+
+ memcpy(ctx->hash_ctx, key, authkeylen);
+ ctx->hash_key_len = authkeylen;
+
+ return 0;
+
+badkey:
+ crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+static int spacc_aead_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+{
+ struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
+
+ ctx->auth_size = authsize;
+
+ return 0;
+}
+
+/*
+ * Check if an AEAD request requires a fallback operation. Some requests can't
+ * be completed in hardware because the hardware may not support certain key
+ * sizes. In these cases we need to complete the request in software.
+ */
+static int spacc_aead_need_fallback(struct spacc_req *req)
+{
+ struct aead_request *aead_req;
+ struct crypto_tfm *tfm = req->req->tfm;
+ struct crypto_alg *alg = req->req->tfm->__crt_alg;
+ struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+ struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ aead_req = container_of(req->req, struct aead_request, base);
+ /*
+ * If we have a non-supported key-length, then we need to do a
+ * software fallback.
+ */
+ if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
+ SPA_CTRL_CIPH_ALG_AES &&
+ ctx->cipher_key_len != AES_KEYSIZE_128 &&
+ ctx->cipher_key_len != AES_KEYSIZE_256)
+ return 1;
+
+ return 0;
+}
+
+static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type,
+ bool is_encrypt)
+{
+ struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
+ struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);
+ int err;
+
+ if (ctx->sw_cipher) {
+ /*
+ * Change the request to use the software fallback transform,
+ * and once the ciphering has completed, put the old transform
+ * back into the request.
+ */
+ aead_request_set_tfm(req, ctx->sw_cipher);
+ err = is_encrypt ? crypto_aead_encrypt(req) :
+ crypto_aead_decrypt(req);
+ aead_request_set_tfm(req, __crypto_aead_cast(old_tfm));
+ } else
+ err = -EINVAL;
+
+ return err;
+}
+
+static void spacc_aead_complete(struct spacc_req *req)
+{
+ spacc_aead_free_ddts(req);
+ req->req->complete(req->req, req->result);
+}
+
+static int spacc_aead_submit(struct spacc_req *req)
+{
+ struct crypto_tfm *tfm = req->req->tfm;
+ struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = req->req->tfm->__crt_alg;
+ struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+ struct spacc_engine *engine = ctx->generic.engine;
+ u32 ctrl, proc_len, assoc_len;
+ struct aead_request *aead_req =
+ container_of(req->req, struct aead_request, base);
+
+ req->result = -EINPROGRESS;
+ req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
+ ctx->cipher_key_len, aead_req->iv, alg->cra_aead.ivsize,
+ ctx->hash_ctx, ctx->hash_key_len);
+
+ /* Set the source and destination DDT pointers. */
+ writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
+ writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
+ writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
+
+ assoc_len = aead_req->assoclen;
+ proc_len = aead_req->cryptlen + assoc_len;
+
+ /*
+ * If we aren't generating an IV, then we need to include the IV in the
+ * associated data so that it is included in the hash.
+ */
+ if (!req->giv) {
+ assoc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
+ proc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
+ } else
+ proc_len += req->giv_len;
+
+ /*
+ * If we are decrypting, we need to take the length of the ICV out of
+ * the processing length.
+ */
+ if (!req->is_encrypt)
+ proc_len -= ctx->auth_size;
+
+ writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);
+ writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);
+ writel(ctx->auth_size, engine->regs + SPA_ICV_LEN_REG_OFFSET);
+ writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
+ writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
+
+ ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
+ (1 << SPA_CTRL_ICV_APPEND);
+ if (req->is_encrypt)
+ ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY);
+ else
+ ctrl |= (1 << SPA_CTRL_KEY_EXP);
+
+ mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
+
+ writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
+
+ return -EINPROGRESS;
+}
+
+/*
+ * Setup an AEAD request for processing. This will configure the engine, load
+ * the context and then start the packet processing.
+ *
+ * @giv Pointer to destination address for a generated IV. If the
+ * request does not need to generate an IV then this should be set to NULL.
+ */
+static int spacc_aead_setup(struct aead_request *req, u8 *giv,
+ unsigned alg_type, bool is_encrypt)
+{
+ struct crypto_alg *alg = req->base.tfm->__crt_alg;
+ struct spacc_engine *engine = to_spacc_alg(alg)->engine;
+ struct spacc_req *dev_req = aead_request_ctx(req);
+ int err = -EINPROGRESS;
+ unsigned long flags;
+ unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
+
+ dev_req->giv = giv;
+ dev_req->giv_len = ivsize;
+ dev_req->req = &req->base;
+ dev_req->is_encrypt = is_encrypt;
+ dev_req->result = -EBUSY;
+ dev_req->engine = engine;
+ dev_req->complete = spacc_aead_complete;
+
+ if (unlikely(spacc_aead_need_fallback(dev_req)))
+ return spacc_aead_do_fallback(req, alg_type, is_encrypt);
+
+ spacc_aead_make_ddts(dev_req, dev_req->giv);
+
+ err = -EINPROGRESS;
+ spin_lock_irqsave(&engine->hw_lock, flags);
+ if (unlikely(spacc_fifo_cmd_full(engine))) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ err = -EBUSY;
+ spin_unlock_irqrestore(&engine->hw_lock, flags);
+ goto out_free_ddts;
+ }
+ list_add_tail(&dev_req->list, &engine->pending);
+ } else {
+ ++engine->in_flight;
+ list_add_tail(&dev_req->list, &engine->in_progress);
+ spacc_aead_submit(dev_req);
+ }
+ spin_unlock_irqrestore(&engine->hw_lock, flags);
+
+ goto out;
+
+out_free_ddts:
+ spacc_aead_free_ddts(dev_req);
+out:
+ return err;
+}
+
+static int spacc_aead_encrypt(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+
+ return spacc_aead_setup(req, NULL, alg->type, 1);
+}
+
+static int spacc_aead_givencrypt(struct aead_givcrypt_request *req)
+{
+ struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
+ struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+ size_t ivsize = crypto_aead_ivsize(tfm);
+ struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
+ unsigned len;
+ __be64 seq;
+
+ memcpy(req->areq.iv, ctx->salt, ivsize);
+ len = ivsize;
+ if (ivsize > sizeof(u64)) {
+ memset(req->giv, 0, ivsize - sizeof(u64));
+ len = sizeof(u64);
+ }
+ seq = cpu_to_be64(req->seq);
+ memcpy(req->giv + ivsize - len, &seq, len);
+
+ return spacc_aead_setup(&req->areq, req->giv, alg->type, 1);
+}
+
+static int spacc_aead_decrypt(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+ struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+
+ return spacc_aead_setup(req, NULL, alg->type, 0);
+}
+
+/*
+ * Initialise a new AEAD context. This is responsible for allocating the
+ * fallback cipher and initialising the context.
+ */
+static int spacc_aead_cra_init(struct crypto_tfm *tfm)
+{
+ struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+ struct spacc_engine *engine = spacc_alg->engine;
+
+ ctx->generic.flags = spacc_alg->type;
+ ctx->generic.engine = engine;
+ ctx->sw_cipher = crypto_alloc_aead(alg->cra_name, 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->sw_cipher)) {
+ dev_warn(engine->dev, "failed to allocate fallback for %s\n",
+ alg->cra_name);
+ ctx->sw_cipher = NULL;
+ }
+ ctx->generic.key_offs = spacc_alg->key_offs;
+ ctx->generic.iv_offs = spacc_alg->iv_offs;
+
+ get_random_bytes(ctx->salt, sizeof(ctx->salt));
+
+ tfm->crt_aead.reqsize = sizeof(struct spacc_req);
+
+ return 0;
+}
+
+/*
+ * Destructor for an AEAD context. This is called when the transform is freed
+ * and must free the fallback cipher.
+ */
+static void spacc_aead_cra_exit(struct crypto_tfm *tfm)
+{
+ struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->sw_cipher)
+ crypto_free_aead(ctx->sw_cipher);
+ ctx->sw_cipher = NULL;
+}
+
+/*
+ * Set the DES key for a block cipher transform. This also performs weak key
+ * checking if the transform has requested it.
+ */
+static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
+ u32 tmp[DES_EXPKEY_WORDS];
+
+ if (len > DES3_EDE_KEY_SIZE) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ if (unlikely(!des_ekey(tmp, key)) &&
+ (crypto_ablkcipher_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ return -EINVAL;
+ }
+
+ memcpy(ctx->key, key, len);
+ ctx->key_len = len;
+
+ return 0;
+}
+
+/*
+ * Set the key for an AES block cipher. Some key lengths are not supported in
+ * hardware so this must also check whether a fallback is needed.
+ */
+static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
+ unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
+ int err = 0;
+
+ if (len > AES_MAX_KEY_SIZE) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ /*
+ * IPSec engine only supports 128 and 256 bit AES keys. If we get a
+ * request for any other size (192 bits) then we need to do a software
+ * fallback.
+ */
+ if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) &&
+ ctx->sw_cipher) {
+ /*
+ * Set the fallback transform to use the same request flags as
+ * the hardware transform.
+ */
+ ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ ctx->sw_cipher->base.crt_flags |=
+ cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK;
+
+ err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len);
+ if (err)
+ goto sw_setkey_failed;
+ } else if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) &&
+ !ctx->sw_cipher)
+ err = -EINVAL;
+
+ memcpy(ctx->key, key, len);
+ ctx->key_len = len;
+
+sw_setkey_failed:
+ if (err && ctx->sw_cipher) {
+ tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+ tfm->crt_flags |=
+ ctx->sw_cipher->base.crt_flags & CRYPTO_TFM_RES_MASK;
+ }
+
+ return err;
+}
+
+static int spacc_kasumi_f8_setkey(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int len)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
+ int err = 0;
+
+ if (len > AES_MAX_KEY_SIZE) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ err = -EINVAL;
+ goto out;
+ }
+
+ memcpy(ctx->key, key, len);
+ ctx->key_len = len;
+
+out:
+ return err;
+}
+
+static int spacc_ablk_need_fallback(struct spacc_req *req)
+{
+ struct spacc_ablk_ctx *ctx;
+ struct crypto_tfm *tfm = req->req->tfm;
+ struct crypto_alg *alg = req->req->tfm->__crt_alg;
+ struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+
+ ctx = crypto_tfm_ctx(tfm);
+
+ return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
+ SPA_CTRL_CIPH_ALG_AES &&
+ ctx->key_len != AES_KEYSIZE_128 &&
+ ctx->key_len != AES_KEYSIZE_256;
+}
+
+static void spacc_ablk_complete(struct spacc_req *req)
+{
+ struct ablkcipher_request *ablk_req =
+ container_of(req->req, struct ablkcipher_request, base);
+
+ if (ablk_req->src != ablk_req->dst) {
+ spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,
+ ablk_req->nbytes, DMA_TO_DEVICE);
+ spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
+ ablk_req->nbytes, DMA_FROM_DEVICE);
+ } else
+ spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
+ ablk_req->nbytes, DMA_BIDIRECTIONAL);
+
+ req->req->complete(req->req, req->result);
+}
+
+static int spacc_ablk_submit(struct spacc_req *req)
+{
+ struct crypto_tfm *tfm = req->req->tfm;
+ struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
+ struct crypto_alg *alg = req->req->tfm->__crt_alg;
+ struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+ struct spacc_engine *engine = ctx->generic.engine;
+ u32 ctrl;
+
+ req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key,
+ ctx->key_len, ablk_req->info, alg->cra_ablkcipher.ivsize,
+ NULL, 0);
+
+ writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
+ writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
+ writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
+
+ writel(ablk_req->nbytes, engine->regs + SPA_PROC_LEN_REG_OFFSET);
+ writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
+ writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
+ writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET);
+
+ ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
+ (req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) :
+ (1 << SPA_CTRL_KEY_EXP));
+
+ mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
+
+ writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
+
+ return -EINPROGRESS;
+}
+
+static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
+ unsigned alg_type, bool is_encrypt)
+{
+ struct crypto_tfm *old_tfm =
+ crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+ struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
+ int err;
+
+ if (!ctx->sw_cipher)
+ return -EINVAL;
+
+ /*
+ * Change the request to use the software fallback transform, and once
+ * the ciphering has completed, put the old transform back into the
+ * request.
+ */
+ ablkcipher_request_set_tfm(req, ctx->sw_cipher);
+ err = is_encrypt ? crypto_ablkcipher_encrypt(req) :
+ crypto_ablkcipher_decrypt(req);
+ ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(old_tfm));
+
+ return err;
+}
+
+static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
+ bool is_encrypt)
+{
+ struct crypto_alg *alg = req->base.tfm->__crt_alg;
+ struct spacc_engine *engine = to_spacc_alg(alg)->engine;
+ struct spacc_req *dev_req = ablkcipher_request_ctx(req);
+ unsigned long flags;
+ int err = -ENOMEM;
+
+ dev_req->req = &req->base;
+ dev_req->is_encrypt = is_encrypt;
+ dev_req->engine = engine;
+ dev_req->complete = spacc_ablk_complete;
+ dev_req->result = -EINPROGRESS;
+
+ if (unlikely(spacc_ablk_need_fallback(dev_req)))
+ return spacc_ablk_do_fallback(req, alg_type, is_encrypt);
+
+ /*
+ * Create the DDT's for the engine. If we share the same source and
+ * destination then we can optimize by reusing the DDT's.
+ */
+ if (req->src != req->dst) {
+ dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src,
+ req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr);
+ if (!dev_req->src_ddt)
+ goto out;
+
+ dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
+ req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr);
+ if (!dev_req->dst_ddt)
+ goto out_free_src;
+ } else {
+ dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
+ req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr);
+ if (!dev_req->dst_ddt)
+ goto out;
+
+ dev_req->src_ddt = NULL;
+ dev_req->src_addr = dev_req->dst_addr;
+ }
+
+ err = -EINPROGRESS;
+ spin_lock_irqsave(&engine->hw_lock, flags);
+ /*
+ * Check if the engine will accept the operation now. If it won't then
+ * we either stick it on the end of a pending list if we can backlog,
+ * or bailout with an error if not.
+ */
+ if (unlikely(spacc_fifo_cmd_full(engine))) {
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ err = -EBUSY;
+ spin_unlock_irqrestore(&engine->hw_lock, flags);
+ goto out_free_ddts;
+ }
+ list_add_tail(&dev_req->list, &engine->pending);
+ } else {
+ ++engine->in_flight;
+ list_add_tail(&dev_req->list, &engine->in_progress);
+ spacc_ablk_submit(dev_req);
+ }
+ spin_unlock_irqrestore(&engine->hw_lock, flags);
+
+ goto out;
+
+out_free_ddts:
+ spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst,
+ req->nbytes, req->src == req->dst ?
+ DMA_BIDIRECTIONAL : DMA_FROM_DEVICE);
+out_free_src:
+ if (req->src != req->dst)
+ spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr,
+ req->src, req->nbytes, DMA_TO_DEVICE);
+out:
+ return err;
+}
+
+static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
+{
+ struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+ struct spacc_engine *engine = spacc_alg->engine;
+
+ ctx->generic.flags = spacc_alg->type;
+ ctx->generic.engine = engine;
+ if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
+ ctx->sw_cipher = crypto_alloc_ablkcipher(alg->cra_name, 0,
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->sw_cipher)) {
+ dev_warn(engine->dev, "failed to allocate fallback for %s\n",
+ alg->cra_name);
+ ctx->sw_cipher = NULL;
+ }
+ }
+ ctx->generic.key_offs = spacc_alg->key_offs;
+ ctx->generic.iv_offs = spacc_alg->iv_offs;
+
+ tfm->crt_ablkcipher.reqsize = sizeof(struct spacc_req);
+
+ return 0;
+}
+
+static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
+{
+ struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->sw_cipher)
+ crypto_free_ablkcipher(ctx->sw_cipher);
+ ctx->sw_cipher = NULL;
+}
+
+static int spacc_ablk_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+
+ return spacc_ablk_setup(req, alg->type, 1);
+}
+
+static int spacc_ablk_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+
+ return spacc_ablk_setup(req, alg->type, 0);
+}
+
+static inline int spacc_fifo_stat_empty(struct spacc_engine *engine)
+{
+ return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) &
+ SPA_FIFO_STAT_EMPTY;
+}
+
+static void spacc_process_done(struct spacc_engine *engine)
+{
+ struct spacc_req *req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->hw_lock, flags);
+
+ while (!spacc_fifo_stat_empty(engine)) {
+ req = list_first_entry(&engine->in_progress, struct spacc_req,
+ list);
+ list_move_tail(&req->list, &engine->completed);
+
+ /* POP the status register. */
+ writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET);
+ req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) &
+ SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET;
+
+ /*
+ * Convert the SPAcc error status into the standard POSIX error
+ * codes.
+ */
+ if (unlikely(req->result)) {
+ switch (req->result) {
+ case SPA_STATUS_ICV_FAIL:
+ req->result = -EBADMSG;
+ break;
+
+ case SPA_STATUS_MEMORY_ERROR:
+ dev_warn(engine->dev,
+ "memory error triggered\n");
+ req->result = -EFAULT;
+ break;
+
+ case SPA_STATUS_BLOCK_ERROR:
+ dev_warn(engine->dev,
+ "block error triggered\n");
+ req->result = -EIO;
+ break;
+ }
+ }
+ }
+
+ tasklet_schedule(&engine->complete);
+
+ spin_unlock_irqrestore(&engine->hw_lock, flags);
+}
+
+static irqreturn_t spacc_spacc_irq(int irq, void *dev)
+{
+ struct spacc_engine *engine = (struct spacc_engine *)dev;
+ u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET);
+
+ writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET);
+ spacc_process_done(engine);
+
+ return IRQ_HANDLED;
+}
+
+static void spacc_packet_timeout(unsigned long data)
+{
+ struct spacc_engine *engine = (struct spacc_engine *)data;
+
+ spacc_process_done(engine);
+}
+
+static int spacc_req_submit(struct spacc_req *req)
+{
+ struct crypto_alg *alg = req->req->tfm->__crt_alg;
+
+ if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags))
+ return spacc_aead_submit(req);
+ else
+ return spacc_ablk_submit(req);
+}
+
+static void spacc_spacc_complete(unsigned long data)
+{
+ struct spacc_engine *engine = (struct spacc_engine *)data;
+ struct spacc_req *req, *tmp;
+ unsigned long flags;
+ int num_removed = 0;
+ LIST_HEAD(completed);
+
+ spin_lock_irqsave(&engine->hw_lock, flags);
+ list_splice_init(&engine->completed, &completed);
+ spin_unlock_irqrestore(&engine->hw_lock, flags);
+
+ list_for_each_entry_safe(req, tmp, &completed, list) {
+ ++num_removed;
+ req->complete(req);
+ }
+
+ /* Try and fill the engine back up again. */
+ spin_lock_irqsave(&engine->hw_lock, flags);
+
+ engine->in_flight -= num_removed;
+
+ list_for_each_entry_safe(req, tmp, &engine->pending, list) {
+ if (spacc_fifo_cmd_full(engine))
+ break;
+
+ list_move_tail(&req->list, &engine->in_progress);
+ ++engine->in_flight;
+ req->result = spacc_req_submit(req);
+ }
+
+ if (engine->in_flight)
+ mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
+
+ spin_unlock_irqrestore(&engine->hw_lock, flags);
+}
+
+#ifdef CONFIG_PM
+static int spacc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct spacc_engine *engine = platform_get_drvdata(pdev);
+
+ /*
+ * We only support standby mode. All we have to do is gate the clock to
+ * the spacc. The hardware will preserve state until we turn it back
+ * on again.
+ */
+ clk_disable(engine->clk);
+
+ return 0;
+}
+
+static int spacc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct spacc_engine *engine = platform_get_drvdata(pdev);
+
+ return clk_enable(engine->clk);
+}
+
+static const struct dev_pm_ops spacc_pm_ops = {
+ .suspend = spacc_suspend,
+ .resume = spacc_resume,
+};
+#endif /* CONFIG_PM */
+
+static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev)
+{
+ return dev ? platform_get_drvdata(to_platform_device(dev)) : NULL;
+}
+
+static ssize_t spacc_stat_irq_thresh_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct spacc_engine *engine = spacc_dev_to_engine(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh);
+}
+
+static ssize_t spacc_stat_irq_thresh_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct spacc_engine *engine = spacc_dev_to_engine(dev);
+ unsigned long thresh;
+
+ if (strict_strtoul(buf, 0, &thresh))
+ return -EINVAL;
+
+ thresh = clamp(thresh, 1UL, engine->fifo_sz - 1);
+
+ engine->stat_irq_thresh = thresh;
+ writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
+ engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
+
+ return len;
+}
+static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show,
+ spacc_stat_irq_thresh_store);
+
+static struct spacc_alg ipsec_engine_algs[] = {
+ {
+ .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC,
+ .key_offs = 0,
+ .iv_offs = AES_MAX_KEY_SIZE,
+ .alg = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_ablkcipher = {
+ .setkey = spacc_aes_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .cra_init = spacc_ablk_cra_init,
+ .cra_exit = spacc_ablk_cra_exit,
+ },
+ },
+ {
+ .key_offs = 0,
+ .iv_offs = AES_MAX_KEY_SIZE,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB,
+ .alg = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_ablkcipher = {
+ .setkey = spacc_aes_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ },
+ .cra_init = spacc_ablk_cra_init,
+ .cra_exit = spacc_ablk_cra_exit,
+ },
+ },
+ {
+ .key_offs = DES_BLOCK_SIZE,
+ .iv_offs = 0,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
+ .alg = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_ablkcipher = {
+ .setkey = spacc_des_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .cra_init = spacc_ablk_cra_init,
+ .cra_exit = spacc_ablk_cra_exit,
+ },
+ },
+ {
+ .key_offs = DES_BLOCK_SIZE,
+ .iv_offs = 0,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
+ .alg = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "ecb-des-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_ablkcipher = {
+ .setkey = spacc_des_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ },
+ .cra_init = spacc_ablk_cra_init,
+ .cra_exit = spacc_ablk_cra_exit,
+ },
+ },
+ {
+ .key_offs = DES_BLOCK_SIZE,
+ .iv_offs = 0,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
+ .alg = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-des3-ede-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_ablkcipher = {
+ .setkey = spacc_des_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ },
+ .cra_init = spacc_ablk_cra_init,
+ .cra_exit = spacc_ablk_cra_exit,
+ },
+ },
+ {
+ .key_offs = DES_BLOCK_SIZE,
+ .iv_offs = 0,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
+ .alg = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3-ede-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_ablkcipher = {
+ .setkey = spacc_des_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ },
+ .cra_init = spacc_ablk_cra_init,
+ .cra_exit = spacc_ablk_cra_exit,
+ },
+ },
+ {
+ .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
+ SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
+ .key_offs = 0,
+ .iv_offs = AES_MAX_KEY_SIZE,
+ .alg = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha1-cbc-aes-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_aead = {
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .givencrypt = spacc_aead_givencrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .cra_init = spacc_aead_cra_init,
+ .cra_exit = spacc_aead_cra_exit,
+ },
+ },
+ {
+ .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
+ SPA_CTRL_HASH_ALG_SHA256 |
+ SPA_CTRL_HASH_MODE_HMAC,
+ .key_offs = 0,
+ .iv_offs = AES_MAX_KEY_SIZE,
+ .alg = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha256-cbc-aes-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_aead = {
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .givencrypt = spacc_aead_givencrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .cra_init = spacc_aead_cra_init,
+ .cra_exit = spacc_aead_cra_exit,
+ },
+ },
+ {
+ .key_offs = 0,
+ .iv_offs = AES_MAX_KEY_SIZE,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
+ SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
+ .alg = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-md5-cbc-aes-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_aead = {
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .givencrypt = spacc_aead_givencrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .cra_init = spacc_aead_cra_init,
+ .cra_exit = spacc_aead_cra_exit,
+ },
+ },
+ {
+ .key_offs = DES_BLOCK_SIZE,
+ .iv_offs = 0,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
+ SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
+ .alg = {
+ .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha1-cbc-3des-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_aead = {
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .givencrypt = spacc_aead_givencrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .cra_init = spacc_aead_cra_init,
+ .cra_exit = spacc_aead_cra_exit,
+ },
+ },
+ {
+ .key_offs = DES_BLOCK_SIZE,
+ .iv_offs = 0,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
+ SPA_CTRL_HASH_ALG_SHA256 |
+ SPA_CTRL_HASH_MODE_HMAC,
+ .alg = {
+ .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha256-cbc-3des-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_aead = {
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .givencrypt = spacc_aead_givencrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .cra_init = spacc_aead_cra_init,
+ .cra_exit = spacc_aead_cra_exit,
+ },
+ },
+ {
+ .key_offs = DES_BLOCK_SIZE,
+ .iv_offs = 0,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
+ SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
+ .alg = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-md5-cbc-3des-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct spacc_aead_ctx),
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_aead = {
+ .setkey = spacc_aead_setkey,
+ .setauthsize = spacc_aead_setauthsize,
+ .encrypt = spacc_aead_encrypt,
+ .decrypt = spacc_aead_decrypt,
+ .givencrypt = spacc_aead_givencrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .cra_init = spacc_aead_cra_init,
+ .cra_exit = spacc_aead_cra_exit,
+ },
+ },
+};
+
+static struct spacc_alg l2_engine_algs[] = {
+ {
+ .key_offs = 0,
+ .iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN,
+ .ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI |
+ SPA_CTRL_CIPH_MODE_F8,
+ .alg = {
+ .cra_name = "f8(kasumi)",
+ .cra_driver_name = "f8-kasumi-picoxcell",
+ .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8,
+ .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_ablkcipher = {
+ .setkey = spacc_kasumi_f8_setkey,
+ .encrypt = spacc_ablk_encrypt,
+ .decrypt = spacc_ablk_decrypt,
+ .min_keysize = 16,
+ .max_keysize = 16,
+ .ivsize = 8,
+ },
+ .cra_init = spacc_ablk_cra_init,
+ .cra_exit = spacc_ablk_cra_exit,
+ },
+ },
+};
+
+static int __devinit spacc_probe(struct platform_device *pdev,
+ unsigned max_ctxs, size_t cipher_pg_sz,
+ size_t hash_pg_sz, size_t fifo_sz,
+ struct spacc_alg *algs, size_t num_algs)
+{
+ int i, err, ret = -EINVAL;
+ struct resource *mem, *irq;
+ struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),
+ GFP_KERNEL);
+ if (!engine)
+ return -ENOMEM;
+
+ engine->max_ctxs = max_ctxs;
+ engine->cipher_pg_sz = cipher_pg_sz;
+ engine->hash_pg_sz = hash_pg_sz;
+ engine->fifo_sz = fifo_sz;
+ engine->algs = algs;
+ engine->num_algs = num_algs;
+ engine->name = dev_name(&pdev->dev);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!mem || !irq) {
+ dev_err(&pdev->dev, "no memory/irq resource for engine\n");
+ return -ENXIO;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
+ engine->name))
+ return -ENOMEM;
+
+ engine->regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+ if (!engine->regs) {
+ dev_err(&pdev->dev, "memory map failed\n");
+ return -ENOMEM;
+ }
+
+ if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
+ engine->name, engine)) {
+ dev_err(engine->dev, "failed to request IRQ\n");
+ return -EBUSY;
+ }
+
+ engine->dev = &pdev->dev;
+ engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET;
+ engine->hash_key_base = engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET;
+
+ engine->req_pool = dmam_pool_create(engine->name, engine->dev,
+ MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K);
+ if (!engine->req_pool)
+ return -ENOMEM;
+
+ spin_lock_init(&engine->hw_lock);
+
+ engine->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(engine->clk)) {
+ dev_info(&pdev->dev, "clk unavailable\n");
+ device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+ return PTR_ERR(engine->clk);
+ }
+
+ if (clk_enable(engine->clk)) {
+ dev_info(&pdev->dev, "unable to enable clk\n");
+ clk_put(engine->clk);
+ return -EIO;
+ }
+
+ err = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+ if (err) {
+ clk_disable(engine->clk);
+ clk_put(engine->clk);
+ return err;
+ }
+
+
+ /*
+ * Use an IRQ threshold of 50% as a default. This seems to be a
+ * reasonable trade off of latency against throughput but can be
+ * changed at runtime.
+ */
+ engine->stat_irq_thresh = (engine->fifo_sz / 2);
+
+ /*
+ * Configure the interrupts. We only use the STAT_CNT interrupt as we
+ * only submit a new packet for processing when we complete another in
+ * the queue. This minimizes time spent in the interrupt handler.
+ */
+ writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
+ engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
+ writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN,
+ engine->regs + SPA_IRQ_EN_REG_OFFSET);
+
+ setup_timer(&engine->packet_timeout, spacc_packet_timeout,
+ (unsigned long)engine);
+
+ INIT_LIST_HEAD(&engine->pending);
+ INIT_LIST_HEAD(&engine->completed);
+ INIT_LIST_HEAD(&engine->in_progress);
+ engine->in_flight = 0;
+ tasklet_init(&engine->complete, spacc_spacc_complete,
+ (unsigned long)engine);
+
+ platform_set_drvdata(pdev, engine);
+
+ INIT_LIST_HEAD(&engine->registered_algs);
+ for (i = 0; i < engine->num_algs; ++i) {
+ engine->algs[i].engine = engine;
+ err = crypto_register_alg(&engine->algs[i].alg);
+ if (!err) {
+ list_add_tail(&engine->algs[i].entry,
+ &engine->registered_algs);
+ ret = 0;
+ }
+ if (err)
+ dev_err(engine->dev, "failed to register alg \"%s\"\n",
+ engine->algs[i].alg.cra_name);
+ else
+ dev_dbg(engine->dev, "registered alg \"%s\"\n",
+ engine->algs[i].alg.cra_name);
+ }
+
+ return ret;
+}
+
+static int __devexit spacc_remove(struct platform_device *pdev)
+{
+ struct spacc_alg *alg, *next;
+ struct spacc_engine *engine = platform_get_drvdata(pdev);
+
+ del_timer_sync(&engine->packet_timeout);
+ device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
+
+ list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
+ list_del(&alg->entry);
+ crypto_unregister_alg(&alg->alg);
+ }
+
+ clk_disable(engine->clk);
+ clk_put(engine->clk);
+
+ return 0;
+}
+
+static int __devinit ipsec_probe(struct platform_device *pdev)
+{
+ return spacc_probe(pdev, SPACC_CRYPTO_IPSEC_MAX_CTXS,
+ SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ,
+ SPACC_CRYPTO_IPSEC_HASH_PG_SZ,
+ SPACC_CRYPTO_IPSEC_FIFO_SZ, ipsec_engine_algs,
+ ARRAY_SIZE(ipsec_engine_algs));
+}
+
+static struct platform_driver ipsec_driver = {
+ .probe = ipsec_probe,
+ .remove = __devexit_p(spacc_remove),
+ .driver = {
+ .name = "picoxcell-ipsec",
+#ifdef CONFIG_PM
+ .pm = &spacc_pm_ops,
+#endif /* CONFIG_PM */
+ },
+};
+
+static int __devinit l2_probe(struct platform_device *pdev)
+{
+ return spacc_probe(pdev, SPACC_CRYPTO_L2_MAX_CTXS,
+ SPACC_CRYPTO_L2_CIPHER_PG_SZ,
+ SPACC_CRYPTO_L2_HASH_PG_SZ, SPACC_CRYPTO_L2_FIFO_SZ,
+ l2_engine_algs, ARRAY_SIZE(l2_engine_algs));
+}
+
+static struct platform_driver l2_driver = {
+ .probe = l2_probe,
+ .remove = __devexit_p(spacc_remove),
+ .driver = {
+ .name = "picoxcell-l2",
+#ifdef CONFIG_PM
+ .pm = &spacc_pm_ops,
+#endif /* CONFIG_PM */
+ },
+};
+
+static int __init spacc_init(void)
+{
+ int ret = platform_driver_register(&ipsec_driver);
+ if (ret) {
+ pr_err("failed to register ipsec spacc driver");
+ goto out;
+ }
+
+ ret = platform_driver_register(&l2_driver);
+ if (ret) {
+ pr_err("failed to register l2 spacc driver");
+ goto l2_failed;
+ }
+
+ return 0;
+
+l2_failed:
+ platform_driver_unregister(&ipsec_driver);
+out:
+ return ret;
+}
+module_init(spacc_init);
+
+static void __exit spacc_exit(void)
+{
+ platform_driver_unregister(&ipsec_driver);
+ platform_driver_unregister(&l2_driver);
+}
+module_exit(spacc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jamie Iles");
diff --git a/drivers/crypto/picoxcell_crypto_regs.h b/drivers/crypto/picoxcell_crypto_regs.h
new file mode 100644
index 0000000..af93442
--- /dev/null
+++ b/drivers/crypto/picoxcell_crypto_regs.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2010 Picochip Ltd., Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#ifndef __PICOXCELL_CRYPTO_REGS_H__
+#define __PICOXCELL_CRYPTO_REGS_H__
+
+#define SPA_STATUS_OK 0
+#define SPA_STATUS_ICV_FAIL 1
+#define SPA_STATUS_MEMORY_ERROR 2
+#define SPA_STATUS_BLOCK_ERROR 3
+
+#define SPA_IRQ_CTRL_STAT_CNT_OFFSET 16
+#define SPA_IRQ_STAT_STAT_MASK (1 << 4)
+#define SPA_FIFO_STAT_STAT_OFFSET 16
+#define SPA_FIFO_STAT_STAT_CNT_MASK (0x3F << SPA_FIFO_STAT_STAT_OFFSET)
+#define SPA_STATUS_RES_CODE_OFFSET 24
+#define SPA_STATUS_RES_CODE_MASK (0x3 << SPA_STATUS_RES_CODE_OFFSET)
+#define SPA_KEY_SZ_CTX_INDEX_OFFSET 8
+#define SPA_KEY_SZ_CIPHER_OFFSET 31
+
+#define SPA_IRQ_EN_REG_OFFSET 0x00000000
+#define SPA_IRQ_STAT_REG_OFFSET 0x00000004
+#define SPA_IRQ_CTRL_REG_OFFSET 0x00000008
+#define SPA_FIFO_STAT_REG_OFFSET 0x0000000C
+#define SPA_SDMA_BRST_SZ_REG_OFFSET 0x00000010
+#define SPA_SRC_PTR_REG_OFFSET 0x00000020
+#define SPA_DST_PTR_REG_OFFSET 0x00000024
+#define SPA_OFFSET_REG_OFFSET 0x00000028
+#define SPA_AAD_LEN_REG_OFFSET 0x0000002C
+#define SPA_PROC_LEN_REG_OFFSET 0x00000030
+#define SPA_ICV_LEN_REG_OFFSET 0x00000034
+#define SPA_ICV_OFFSET_REG_OFFSET 0x00000038
+#define SPA_SW_CTRL_REG_OFFSET 0x0000003C
+#define SPA_CTRL_REG_OFFSET 0x00000040
+#define SPA_AUX_INFO_REG_OFFSET 0x0000004C
+#define SPA_STAT_POP_REG_OFFSET 0x00000050
+#define SPA_STATUS_REG_OFFSET 0x00000054
+#define SPA_KEY_SZ_REG_OFFSET 0x00000100
+#define SPA_CIPH_KEY_BASE_REG_OFFSET 0x00004000
+#define SPA_HASH_KEY_BASE_REG_OFFSET 0x00008000
+#define SPA_RC4_CTX_BASE_REG_OFFSET 0x00020000
+
+#define SPA_IRQ_EN_REG_RESET 0x00000000
+#define SPA_IRQ_CTRL_REG_RESET 0x00000000
+#define SPA_FIFO_STAT_REG_RESET 0x00000000
+#define SPA_SDMA_BRST_SZ_REG_RESET 0x00000000
+#define SPA_SRC_PTR_REG_RESET 0x00000000
+#define SPA_DST_PTR_REG_RESET 0x00000000
+#define SPA_OFFSET_REG_RESET 0x00000000
+#define SPA_AAD_LEN_REG_RESET 0x00000000
+#define SPA_PROC_LEN_REG_RESET 0x00000000
+#define SPA_ICV_LEN_REG_RESET 0x00000000
+#define SPA_ICV_OFFSET_REG_RESET 0x00000000
+#define SPA_SW_CTRL_REG_RESET 0x00000000
+#define SPA_CTRL_REG_RESET 0x00000000
+#define SPA_AUX_INFO_REG_RESET 0x00000000
+#define SPA_STAT_POP_REG_RESET 0x00000000
+#define SPA_STATUS_REG_RESET 0x00000000
+#define SPA_KEY_SZ_REG_RESET 0x00000000
+
+#define SPA_CTRL_HASH_ALG_IDX 4
+#define SPA_CTRL_CIPH_MODE_IDX 8
+#define SPA_CTRL_HASH_MODE_IDX 12
+#define SPA_CTRL_CTX_IDX 16
+#define SPA_CTRL_ENCRYPT_IDX 24
+#define SPA_CTRL_AAD_COPY 25
+#define SPA_CTRL_ICV_PT 26
+#define SPA_CTRL_ICV_ENC 27
+#define SPA_CTRL_ICV_APPEND 28
+#define SPA_CTRL_KEY_EXP 29
+
+#define SPA_KEY_SZ_CXT_IDX 8
+#define SPA_KEY_SZ_CIPHER_IDX 31
+
+#define SPA_IRQ_EN_CMD0_EN (1 << 0)
+#define SPA_IRQ_EN_STAT_EN (1 << 4)
+#define SPA_IRQ_EN_GLBL_EN (1 << 31)
+
+#define SPA_CTRL_CIPH_ALG_NULL 0x00
+#define SPA_CTRL_CIPH_ALG_DES 0x01
+#define SPA_CTRL_CIPH_ALG_AES 0x02
+#define SPA_CTRL_CIPH_ALG_RC4 0x03
+#define SPA_CTRL_CIPH_ALG_MULTI2 0x04
+#define SPA_CTRL_CIPH_ALG_KASUMI 0x05
+
+#define SPA_CTRL_HASH_ALG_NULL (0x00 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_MD5 (0x01 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_SHA (0x02 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_SHA224 (0x03 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_SHA256 (0x04 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_SHA384 (0x05 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_SHA512 (0x06 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_AESMAC (0x07 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_AESCMAC (0x08 << SPA_CTRL_HASH_ALG_IDX)
+#define SPA_CTRL_HASH_ALG_KASF9 (0x09 << SPA_CTRL_HASH_ALG_IDX)
+
+#define SPA_CTRL_CIPH_MODE_NULL (0x00 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_ECB (0x00 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_CBC (0x01 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_CTR (0x02 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_CCM (0x03 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_GCM (0x05 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_OFB (0x07 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_CFB (0x08 << SPA_CTRL_CIPH_MODE_IDX)
+#define SPA_CTRL_CIPH_MODE_F8 (0x09 << SPA_CTRL_CIPH_MODE_IDX)
+
+#define SPA_CTRL_HASH_MODE_RAW (0x00 << SPA_CTRL_HASH_MODE_IDX)
+#define SPA_CTRL_HASH_MODE_SSLMAC (0x01 << SPA_CTRL_HASH_MODE_IDX)
+#define SPA_CTRL_HASH_MODE_HMAC (0x02 << SPA_CTRL_HASH_MODE_IDX)
+
+#define SPA_FIFO_STAT_EMPTY (1 << 31)
+#define SPA_FIFO_CMD_FULL (1 << 7)
+
+#endif /* __PICOXCELL_CRYPTO_REGS_H__ */
diff --git a/drivers/gpio/ml_ioh_gpio.c b/drivers/gpio/ml_ioh_gpio.c
index cead8e6..7f6f01a 100644
--- a/drivers/gpio/ml_ioh_gpio.c
+++ b/drivers/gpio/ml_ioh_gpio.c
@@ -326,6 +326,7 @@ static DEFINE_PCI_DEVICE_TABLE(ioh_gpio_pcidev_id) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x802E) },
{ 0, }
};
+MODULE_DEVICE_TABLE(pci, ioh_gpio_pcidev_id);
static struct pci_driver ioh_gpio_driver = {
.name = "ml_ioh_gpio",
diff --git a/drivers/gpio/pch_gpio.c b/drivers/gpio/pch_gpio.c
index 0eba0a7..2c6af87 100644
--- a/drivers/gpio/pch_gpio.c
+++ b/drivers/gpio/pch_gpio.c
@@ -286,6 +286,7 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
{ 0, }
};
+MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id);
static struct pci_driver pch_gpio_driver = {
.name = "pch_gpio",
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 6977a1c..f73ef43 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -672,7 +672,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
struct drm_crtc_helper_funcs *crtc_funcs;
u16 *red, *green, *blue, *transp;
struct drm_crtc *crtc;
- int i, rc = 0;
+ int i, j, rc = 0;
int start;
for (i = 0; i < fb_helper->crtc_count; i++) {
@@ -685,7 +685,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
transp = cmap->transp;
start = cmap->start;
- for (i = 0; i < cmap->len; i++) {
+ for (j = 0; j < cmap->len; j++) {
u16 hred, hgreen, hblue, htransp = 0xffff;
hred = *red++;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 3dadfa2..28d1d3c 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -164,8 +164,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* available. In that case we can't account for this and just
* hope for the best.
*/
- if ((vblrc > 0) && (abs(diff_ns) > 1000000))
+ if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
atomic_inc(&dev->_vblank_count[crtc]);
+ smp_mb__after_atomic_inc();
+ }
/* Invalidate all timestamps while vblank irq's are off. */
clear_vblank_timestamps(dev, crtc);
@@ -491,6 +493,12 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
/* Dot clock in Hz: */
dotclock = (u64) crtc->hwmode.clock * 1000;
+ /* Fields of interlaced scanout modes are only halve a frame duration.
+ * Double the dotclock to get halve the frame-/line-/pixelduration.
+ */
+ if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
+ dotclock *= 2;
+
/* Valid dotclock? */
if (dotclock > 0) {
/* Convert scanline length in pixels and video dot clock to
@@ -603,14 +611,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
return -EAGAIN;
}
- /* Don't know yet how to handle interlaced or
- * double scan modes. Just no-op for now.
- */
- if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) {
- DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc);
- return -ENOTSUPP;
- }
-
/* Get current scanout position with system timestamp.
* Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
* if single query takes longer than max_error nanoseconds.
@@ -858,10 +858,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
if (rc) {
tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
vblanktimestamp(dev, crtc, tslot) = t_vblank;
- smp_wmb();
}
+ smp_mb__before_atomic_inc();
atomic_add(diff, &dev->_vblank_count[crtc]);
+ smp_mb__after_atomic_inc();
}
/**
@@ -1011,7 +1012,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
- int crtc, ret = 0;
+ int ret = 0;
+ unsigned int crtc;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
@@ -1293,15 +1295,16 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
* e.g., due to spurious vblank interrupts. We need to
* ignore those for accounting.
*/
- if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
+ if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
/* Store new timestamp in ringbuffer. */
vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
- smp_wmb();
/* Increment cooked vblank count. This also atomically commits
* the timestamp computed above.
*/
+ smp_mb__before_atomic_inc();
atomic_inc(&dev->_vblank_count[crtc]);
+ smp_mb__after_atomic_inc();
} else {
DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
crtc, (int) diff_ns);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 3601466..4ff9b6c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
int max_freq;
/* RPSTAT1 is in the GT power well */
- __gen6_force_wake_get(dev_priv);
+ __gen6_gt_force_wake_get(dev_priv);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
@@ -888,7 +888,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
max_freq * 100);
- __gen6_force_wake_put(dev_priv);
+ __gen6_gt_force_wake_put(dev_priv);
} else {
seq_printf(m, "no P-state info available\n");
}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 17bd766..e33d9be 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1895,6 +1895,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_GEN2(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
+ /* 965GM sometimes incorrectly writes to hardware status page (HWS)
+ * using 32bit addressing, overwriting memory if HWS is located
+ * above 4GB.
+ *
+ * The documentation also mentions an issue with undefined
+ * behaviour if any general state is accessed within a page above 4GB,
+ * which also needs to be handled carefully.
+ */
+ if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+ dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
+
mmio_bar = IS_GEN2(dev) ? 1 : 0;
dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
if (!dev_priv->regs) {
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0ad533f..22ec066 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -46,6 +46,9 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
unsigned int i915_powersave = 1;
module_param_named(powersave, i915_powersave, int, 0600);
+unsigned int i915_semaphores = 0;
+module_param_named(semaphores, i915_semaphores, int, 0600);
+
unsigned int i915_enable_rc6 = 0;
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
@@ -254,7 +257,7 @@ void intel_detect_pch (struct drm_device *dev)
}
}
-void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
@@ -270,12 +273,22 @@ void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
udelay(10);
}
-void __gen6_force_wake_put(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
POSTING_READ(FORCEWAKE);
}
+void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+{
+ int loop = 500;
+ u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ while (fifo < 20 && loop--) {
+ udelay(10);
+ fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+ }
+}
+
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 65dfe81..456f404 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -956,6 +956,7 @@ extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc;
extern unsigned int i915_powersave;
+extern unsigned int i915_semaphores;
extern unsigned int i915_lvds_downclock;
extern unsigned int i915_panel_use_ssc;
extern unsigned int i915_enable_rc6;
@@ -1177,6 +1178,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
void i915_gem_free_all_phys_object(struct drm_device *dev);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
+uint32_t
+i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
+
/* i915_gem_gtt.c */
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
@@ -1353,22 +1357,32 @@ __i915_write(64, q)
* must be set to prevent GT core from power down and stale values being
* returned.
*/
-void __gen6_force_wake_get(struct drm_i915_private *dev_priv);
-void __gen6_force_wake_put (struct drm_i915_private *dev_priv);
-static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
+
+static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val;
if (dev_priv->info->gen >= 6) {
- __gen6_force_wake_get(dev_priv);
+ __gen6_gt_force_wake_get(dev_priv);
val = I915_READ(reg);
- __gen6_force_wake_put(dev_priv);
+ __gen6_gt_force_wake_put(dev_priv);
} else
val = I915_READ(reg);
return val;
}
+static inline void i915_gt_write(struct drm_i915_private *dev_priv,
+ u32 reg, u32 val)
+{
+ if (dev_priv->info->gen >= 6)
+ __gen6_gt_wait_for_fifo(dev_priv);
+ I915_WRITE(reg, val);
+}
+
static inline void
i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
{
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index cf4f74c..36e66cc 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1398,7 +1398,7 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
* Return the required GTT alignment for an object, only taking into account
* unfenced tiled surface requirements.
*/
-static uint32_t
+uint32_t
i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d2f445e..50ab161 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -772,8 +772,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
if (from == NULL || to == from)
return 0;
- /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */
- if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev))
+ /* XXX gpu semaphores are implicated in various hard hangs on SNB */
+ if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
return i915_gem_object_wait_rendering(obj, true);
idx = intel_ring_sync_index(from, to);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 22a32b9..d64843e 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -349,14 +349,27 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
- obj->tiling_changed = true;
- obj->tiling_mode = args->tiling_mode;
- obj->stride = args->stride;
+ /* Rebind if we need a change of alignment */
+ if (!obj->map_and_fenceable) {
+ u32 unfenced_alignment =
+ i915_gem_get_unfenced_gtt_alignment(obj);
+ if (obj->gtt_offset & (unfenced_alignment - 1))
+ ret = i915_gem_object_unbind(obj);
+ }
+
+ if (ret == 0) {
+ obj->tiling_changed = true;
+ obj->tiling_mode = args->tiling_mode;
+ obj->stride = args->stride;
+ }
}
+ /* we have to maintain this existing ABI... */
+ args->stride = obj->stride;
+ args->tiling_mode = obj->tiling_mode;
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
- return 0;
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 97f946dc..8a9e08b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -316,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
+ DRM_DEBUG_KMS("running encoder hotplug functions\n");
+
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
if (encoder->hot_plug)
encoder->hot_plug(encoder);
@@ -1649,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
} else {
hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
- hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK;
- I915_WRITE(FDI_RXA_IMR, 0);
- I915_WRITE(FDI_RXB_IMR, 0);
+ hotplug_mask |= SDE_AUX_MASK;
}
dev_priv->pch_irq_mask = ~hotplug_mask;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 15d94c63..2abe240 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3271,6 +3271,8 @@
#define FORCEWAKE 0xA18C
#define FORCEWAKE_ACK 0x130090
+#define GT_FIFO_FREE_ENTRIES 0x120008
+
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3b00653..49fb54f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1219,7 +1219,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
u32 blt_ecoskpd;
/* Make sure blitter notifies FBC of writes */
- __gen6_force_wake_get(dev_priv);
+ __gen6_gt_force_wake_get(dev_priv);
blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT;
@@ -1230,7 +1230,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
GEN6_BLITTER_LOCK_SHIFT);
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
POSTING_READ(GEN6_BLITTER_ECOSKPD);
- __gen6_force_wake_put(dev_priv);
+ __gen6_gt_force_wake_put(dev_priv);
}
static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
@@ -1630,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
wait_event(dev_priv->pending_flip_queue,
+ atomic_read(&dev_priv->mm.wedged) ||
atomic_read(&obj->pending_flip) == 0);
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
* framebuffer.
+ *
+ * This should only fail upon a hung GPU, in which case we
+ * can safely continue.
*/
ret = i915_gem_object_flush_gpu(obj, false);
- if (ret) {
- i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ (void) ret;
}
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -2045,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
atomic_read(&obj->pending_flip) == 0);
}
+static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+
+ /*
+ * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
+ * must be driven by its own crtc; no sharing is possible.
+ */
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ if (encoder->base.crtc != crtc)
+ continue;
+
+ switch (encoder->type) {
+ case INTEL_OUTPUT_EDP:
+ if (!intel_encoder_is_pch_edp(&encoder->base))
+ return false;
+ continue;
+ }
+ }
+
+ return true;
+}
+
static void ironlake_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -2053,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 reg, temp;
+ bool is_pch_port = false;
if (intel_crtc->active)
return;
@@ -2066,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
}
- ironlake_fdi_enable(crtc);
+ is_pch_port = intel_crtc_driving_pch(crtc);
+
+ if (is_pch_port)
+ ironlake_fdi_enable(crtc);
+ else {
+ /* disable CPU FDI tx and PCH FDI rx */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+ POSTING_READ(reg);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(0x7 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(100);
+
+ /* Ironlake workaround, disable clock pointer after downing FDI */
+ if (HAS_PCH_IBX(dev))
+ I915_WRITE(FDI_RX_CHICKEN(pipe),
+ I915_READ(FDI_RX_CHICKEN(pipe) &
+ ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
+
+ /* still set train pattern 1 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ /* BPC in FDI rx is consistent with that in PIPECONF */
+ temp &= ~(0x07 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(100);
+ }
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
@@ -2100,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_flush_display_plane(dev, plane);
}
+ /* Skip the PCH stuff if possible */
+ if (!is_pch_port)
+ goto done;
+
/* For PCH output, training FDI link */
if (IS_GEN6(dev))
gen6_fdi_link_train(crtc);
@@ -2184,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(reg, temp | TRANS_ENABLE);
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %d\n", pipe);
-
+done:
intel_crtc_load_lut(crtc);
intel_update_fbc(dev);
intel_crtc_update_cursor(crtc, true);
@@ -6203,7 +6282,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
* userspace...
*/
I915_WRITE(GEN6_RC_STATE, 0);
- __gen6_force_wake_get(dev_priv);
+ __gen6_gt_force_wake_get(dev_priv);
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -6301,7 +6380,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
- __gen6_force_wake_put(dev_priv);
+ __gen6_gt_force_wake_put(dev_priv);
}
void intel_enable_clock_gating(struct drm_device *dev)
@@ -6496,7 +6575,7 @@ static void ironlake_disable_rc6(struct drm_device *dev)
POSTING_READ(RSTDBYCTL);
}
- ironlake_disable_rc6(dev);
+ ironlake_teardown_rc6(dev);
}
static int ironlake_setup_rc6(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index c65992d..f8f86e5 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -208,7 +208,6 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
val &= ~1;
pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
val *= lbpc;
- val >>= 1;
}
}
@@ -235,11 +234,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
if (is_backlight_combination_mode(dev)){
u32 max = intel_panel_get_max_backlight(dev);
- u8 lpbc;
+ u8 lbpc;
- lpbc = level * 0xfe / max + 1;
- level /= lpbc;
- pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
+ lbpc = level * 0xfe / max + 1;
+ level /= lbpc;
+ pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
}
tmp = I915_READ(BLC_PWM_CTL);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6d6fde8..3430686 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -14,22 +14,23 @@ struct intel_hw_status_page {
struct drm_i915_gem_object *obj;
};
-#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
+#define I915_RING_READ(reg) i915_gt_read(dev_priv, reg)
+#define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val)
#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
-#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
+#define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val)
#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
-#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
+#define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val)
#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base))
-#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
+#define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val)
#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
-#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
+#define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val)
-#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
+#define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 49e5e99..6bdab89 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6228,7 +6228,7 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
entry->tvconf.has_component_output = false;
break;
case OUTPUT_LVDS:
- if ((conn & 0x00003f00) != 0x10)
+ if ((conn & 0x00003f00) >> 8 != 0x10)
entry->lvdsconf.use_straps_for_mode = true;
entry->lvdsconf.use_power_scripts = true;
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index a7fae26..a521840 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -49,7 +49,10 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
DRM_ERROR("bo %p still attached to GEM object\n", bo);
nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
- nouveau_vm_put(&nvbo->vma);
+ if (nvbo->vma.node) {
+ nouveau_vm_unmap(&nvbo->vma);
+ nouveau_vm_put(&nvbo->vma);
+ }
kfree(nvbo);
}
@@ -128,6 +131,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
}
}
+ nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0);
nvbo->channel = chan;
@@ -166,17 +170,17 @@ static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+ int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
if (dev_priv->card_type == NV_10 &&
- nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) {
+ nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
+ nvbo->bo.mem.num_pages < vram_pages / 2) {
/*
* Make sure that the color and depth buffers are handled
* by independent memory controller units. Up to a 9x
* speed up when alpha-blending and depth-test are enabled
* at the same time.
*/
- int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
-
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
nvbo->placement.fpfn = vram_pages / 2;
nvbo->placement.lpfn = ~0;
@@ -785,7 +789,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
out:
ttm_bo_mem_put(bo, &tmp_mem);
return ret;
@@ -811,11 +815,11 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
+ ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index a21e000..390d82c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -507,6 +507,7 @@ nouveau_connector_native_mode(struct drm_connector *connector)
int high_w = 0, high_h = 0, high_v = 0;
list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
+ mode->vrefresh = drm_mode_vrefresh(mode);
if (helper->mode_valid(connector, mode) != MODE_OK ||
(mode->flags & DRM_MODE_FLAG_INTERLACE))
continue;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 65699bf..b368ed7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -83,7 +83,8 @@ nouveau_dma_init(struct nouveau_channel *chan)
return ret;
/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
- ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
+ ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000,
+ &chan->m2mf_ntfy);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 9821fca..982d70b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -852,7 +852,8 @@ extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
extern int nouveau_notifier_init_channel(struct nouveau_channel *);
extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
- int cout, uint32_t *offset);
+ int cout, uint32_t start, uint32_t end,
+ uint32_t *offset);
extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *);
extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
struct drm_file *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 26347b7..b0fb9bd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -725,8 +725,10 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
mem->page_alignment << PAGE_SHIFT, size_nc,
(nvbo->tile_flags >> 8) & 0xff, &node);
- if (ret)
- return ret;
+ if (ret) {
+ mem->mm_node = NULL;
+ return (ret == -ENOSPC) ? 0 : ret;
+ }
node->page_shift = 12;
if (nvbo->vma.node)
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
index 8844b50c..7609756 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -123,7 +123,7 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
return 0;
}
- return -ENOMEM;
+ return -ENOSPC;
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index fe29d60..5ea1676 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -96,7 +96,8 @@ nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
int
nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
- int size, uint32_t *b_offset)
+ int size, uint32_t start, uint32_t end,
+ uint32_t *b_offset)
{
struct drm_device *dev = chan->dev;
struct nouveau_gpuobj *nobj = NULL;
@@ -104,9 +105,10 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
uint32_t offset;
int target, ret;
- mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0);
+ mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
+ start, end, 0);
if (mem)
- mem = drm_mm_get_block(mem, size, 0);
+ mem = drm_mm_get_block_range(mem, size, 0, start, end);
if (!mem) {
NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
return -ENOMEM;
@@ -177,7 +179,8 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
if (IS_ERR(chan))
return PTR_ERR(chan);
- ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
+ ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000,
+ &na->offset);
nouveau_channel_put(&chan);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index f05c0cd..4399e2f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -543,7 +543,7 @@ nouveau_pm_resume(struct drm_device *dev)
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
struct nouveau_pm_level *perflvl;
- if (pm->cur == &pm->boot)
+ if (!pm->cur || pm->cur == &pm->boot)
return;
perflvl = pm->cur;
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index ef23550..c82db37 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -342,8 +342,8 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
if (nv_encoder->dcb->type == OUTPUT_LVDS) {
bool duallink, dummy;
- nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode->
- clock, &duallink, &dummy);
+ nouveau_bios_parse_lvds_table(dev, output_mode->clock,
+ &duallink, &dummy);
if (duallink)
regp->fp_control |= (8 << 28);
} else
@@ -518,8 +518,6 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
return;
if (nv_encoder->dcb->lvdsconf.use_power_scripts) {
- struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
-
/* when removing an output, crtc may not be set, but PANEL_OFF
* must still be run
*/
@@ -527,12 +525,8 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
if (mode == DRM_MODE_DPMS_ON) {
- if (!nv_connector->native_mode) {
- NV_ERROR(dev, "Not turning on LVDS without native mode\n");
- return;
- }
call_lvds_script(dev, nv_encoder->dcb, head,
- LVDS_PANEL_ON, nv_connector->native_mode->clock);
+ LVDS_PANEL_ON, nv_encoder->mode.clock);
} else
/* pxclk of 0 is fine for PANEL_OFF, and for a
* disconnected LVDS encoder there is no native_mode
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 8870d72..18d30c2 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -211,18 +211,32 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
switch (dev_priv->chipset) {
+ case 0x40:
+ case 0x41: /* guess */
+ case 0x42:
+ case 0x43:
+ case 0x45: /* guess */
+ case 0x4e:
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
+ break;
case 0x44:
case 0x4a:
- case 0x4e:
nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
break;
-
case 0x46:
case 0x47:
case 0x49:
case 0x4b:
+ case 0x4c:
+ case 0x67:
+ default:
nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
@@ -230,15 +244,6 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
break;
-
- default:
- nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
- nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
- nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
- nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
- break;
}
}
@@ -396,17 +401,20 @@ nv40_graph_init(struct drm_device *dev)
break;
default:
switch (dev_priv->chipset) {
- case 0x46:
- case 0x47:
- case 0x49:
- case 0x4b:
- nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
- nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
- break;
- default:
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x45:
+ case 0x4e:
+ case 0x44:
+ case 0x4a:
nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
break;
+ default:
+ nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
+ break;
}
nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index ea00418..e57caa2 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -403,16 +403,24 @@ nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
void
nv50_instmem_flush(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ spin_lock(&dev_priv->ramin_lock);
nv_wr32(dev, 0x00330c, 0x00000001);
if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
NV_ERROR(dev, "PRAMIN flush timeout\n");
+ spin_unlock(&dev_priv->ramin_lock);
}
void
nv84_instmem_flush(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ spin_lock(&dev_priv->ramin_lock);
nv_wr32(dev, 0x070000, 0x00000001);
if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
NV_ERROR(dev, "PRAMIN flush timeout\n");
+ spin_unlock(&dev_priv->ramin_lock);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 459ff08..6144156 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -169,7 +169,11 @@ nv50_vm_flush(struct nouveau_vm *vm)
void
nv50_vm_flush_engine(struct drm_device *dev, int engine)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ spin_lock(&dev_priv->ramin_lock);
nv_wr32(dev, 0x100c80, (engine << 16) | 1);
if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
+ spin_unlock(&dev_priv->ramin_lock);
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 095bc50..a4e5e53 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -557,9 +557,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
/* use recommended ref_div for ss */
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
if (ss_enabled) {
if (ss->refdiv) {
+ pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
pll->flags |= RADEON_PLL_USE_REF_DIV;
pll->reference_div = ss->refdiv;
if (ASIC_IS_AVIVO(rdev))
@@ -662,10 +662,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
index, (uint32_t *)&args);
adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
if (args.v3.sOutput.ucRefDiv) {
+ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
pll->flags |= RADEON_PLL_USE_REF_DIV;
pll->reference_div = args.v3.sOutput.ucRefDiv;
}
if (args.v3.sOutput.ucPostDiv) {
+ pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
pll->flags |= RADEON_PLL_USE_POST_DIV;
pll->post_div = args.v3.sOutput.ucPostDiv;
}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index d270b3f..6140ea1 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2194,7 +2194,6 @@ int evergreen_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
}
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
r700_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
@@ -2934,7 +2933,7 @@ static int evergreen_startup(struct radeon_device *rdev)
/* XXX: ontario has problems blitting to gart at the moment */
if (rdev->family == CHIP_PALM) {
rdev->asic->copy = NULL;
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
}
/* allocate wb buffer */
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 2adfb03..2be698e 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -623,7 +623,7 @@ done:
dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
return r;
}
- rdev->mc.active_vram_size = rdev->mc.real_vram_size;
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
@@ -631,7 +631,7 @@ void evergreen_blit_fini(struct radeon_device *rdev)
{
int r;
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
if (rdev->r600_blit.shader_obj == NULL)
return;
/* If we can't reserve the bo, unref should be enough to destroy
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 56deae5..e372f9e 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -70,23 +70,6 @@ MODULE_FIRMWARE(FIRMWARE_R520);
void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
{
- struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
- u32 tmp;
-
- /* make sure flip is at vb rather than hb */
- tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset);
- tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL;
- /* make sure pending bit is asserted */
- tmp |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
- WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp);
-
- /* set pageflip to happen as late as possible in the vblank interval.
- * same field for crtc1/2
- */
- tmp = RREG32(RADEON_CRTC_GEN_CNTL);
- tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK;
- WREG32(RADEON_CRTC_GEN_CNTL, tmp);
-
/* enable the pflip int */
radeon_irq_kms_pflip_irq_get(rdev, crtc);
}
@@ -1041,7 +1024,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
return r;
}
rdev->cp.ready = true;
- rdev->mc.active_vram_size = rdev->mc.real_vram_size;
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
@@ -1059,7 +1042,7 @@ void r100_cp_fini(struct radeon_device *rdev)
void r100_cp_disable(struct radeon_device *rdev)
{
/* Disable ring */
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
rdev->cp.ready = false;
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
@@ -2329,7 +2312,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
/* FIXME we don't use the second aperture yet when we could use it */
if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
if (rdev->flags & RADEON_IS_IGP) {
uint32_t tom;
@@ -3490,7 +3472,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
track->num_texture = 16;
track->maxy = 4096;
track->separate_cube = 0;
- track->aaresolve = true;
+ track->aaresolve = false;
track->aa.robj = NULL;
}
@@ -3801,8 +3783,6 @@ static int r100_startup(struct radeon_device *rdev)
r100_mc_program(rdev);
/* Resume clock */
r100_clock_startup(rdev);
- /* Initialize GPU configuration (# pipes, ...) */
-// r100_gpu_init(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
r100_enable_bm(rdev);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 768c60e..069efa8 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -910,6 +910,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_X16:
+ case R300_TX_FORMAT_FL_I16:
case R300_TX_FORMAT_Y8X8:
case R300_TX_FORMAT_Z5Y6X5:
case R300_TX_FORMAT_Z6Y5X5:
@@ -922,6 +923,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_Y16X16:
+ case R300_TX_FORMAT_FL_I16A16:
case R300_TX_FORMAT_Z11Y11X10:
case R300_TX_FORMAT_Z10Y11X11:
case R300_TX_FORMAT_W8Z8Y8X8:
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index de88624..9b3fad2 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1255,7 +1255,6 @@ int r600_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
r600_vram_gtt_location(rdev, &rdev->mc);
if (rdev->flags & RADEON_IS_IGP) {
@@ -1937,7 +1936,7 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
*/
void r600_cp_stop(struct radeon_device *rdev)
{
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
WREG32(SCRATCH_UMSK, 0);
}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 41f7aaf..df68d91 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -558,7 +558,7 @@ done:
dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
return r;
}
- rdev->mc.active_vram_size = rdev->mc.real_vram_size;
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
@@ -566,7 +566,7 @@ void r600_blit_fini(struct radeon_device *rdev)
{
int r;
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
if (rdev->r600_blit.shader_obj == NULL)
return;
/* If we can't reserve the bo, unref should be enough to destroy
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 56c48b6..6b342949 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -345,7 +345,6 @@ struct radeon_mc {
* about vram size near mc fb location */
u64 mc_vram_size;
u64 visible_vram_size;
- u64 active_vram_size;
u64 gtt_size;
u64 gtt_start;
u64 gtt_end;
@@ -1448,6 +1447,7 @@ extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *m
extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern int radeon_resume_kms(struct drm_device *dev);
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
+extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
extern bool r600_card_posted(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e75d63b..793c5e6 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -834,6 +834,9 @@ static struct radeon_asic sumo_asic = {
.pm_finish = &evergreen_pm_finish,
.pm_init_profile = &rs780_pm_init_profile,
.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
};
static struct radeon_asic btc_asic = {
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0e65709..3e7e7f9e 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -971,7 +971,7 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
max_fractional_feed_div = pll->max_frac_feedback_div;
}
- for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
+ for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
uint32_t ref_div;
if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 66324b5..cc44bdf 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -113,11 +113,14 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
u32 tiling_flags = 0;
int ret;
int aligned_size, size;
+ int height = mode_cmd->height;
/* need to align pitch with crtc limits */
mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
- size = mode_cmd->pitch * mode_cmd->height;
+ if (rdev->family >= CHIP_R600)
+ height = ALIGN(mode_cmd->height, 8);
+ size = mode_cmd->pitch * height;
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index df95eb8..1fe95df 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -156,9 +156,12 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
{
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_info *args = data;
+ struct ttm_mem_type_manager *man;
+
+ man = &rdev->mman.bdev.man[TTM_PL_VRAM];
args->vram_size = rdev->mc.real_vram_size;
- args->vram_visible = rdev->mc.real_vram_size;
+ args->vram_visible = (u64)man->size << PAGE_SHIFT;
if (rdev->stollen_vga_memory)
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
args->vram_visible -= radeon_fbdev_total_size(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index cf0638c..78968b7 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -443,7 +443,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
(target_fb->bits_per_pixel * 8));
crtc_pitch |= crtc_pitch << 16;
-
+ crtc_offset_cntl |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
if (tiling_flags & RADEON_TILING_MACRO) {
if (ASIC_IS_R300(rdev))
crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN |
@@ -502,6 +502,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
gen_cntl_val = RREG32(gen_cntl_reg);
gen_cntl_val &= ~(0xf << 8);
gen_cntl_val |= (format << 8);
+ gen_cntl_val &= ~RADEON_CRTC_VSTAT_MODE_MASK;
WREG32(gen_cntl_reg, gen_cntl_val);
crtc_offset = (u32)base;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index e5b2cf1..8389b4c 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -589,6 +589,20 @@ void radeon_ttm_fini(struct radeon_device *rdev)
DRM_INFO("radeon: ttm finalized\n");
}
+/* this should only be called at bootup or when userspace
+ * isn't running */
+void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
+{
+ struct ttm_mem_type_manager *man;
+
+ if (!rdev->mman.initialized)
+ return;
+
+ man = &rdev->mman.bdev.man[TTM_PL_VRAM];
+ /* this just adjusts TTM size idea, which sets lpfn to the correct value */
+ man->size = size >> PAGE_SHIFT;
+}
+
static struct vm_operations_struct radeon_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops = NULL;
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5afe294..8af4679 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -751,7 +751,6 @@ void rs600_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
base = RREG32_MC(R_000004_MC_FB_LOCATION);
base = G_000004_MC_FB_START(base) << 16;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 6638c8e..66c949b 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -157,7 +157,6 @@ void rs690_mc_init(struct radeon_device *rdev)
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
base = G_000100_MC_FB_START(base) << 16;
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index d8ba676..714ad45 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -307,7 +307,7 @@ static void rv770_mc_program(struct radeon_device *rdev)
*/
void r700_cp_stop(struct radeon_device *rdev)
{
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
WREG32(SCRATCH_UMSK, 0);
}
@@ -1123,7 +1123,6 @@ int rv770_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
- rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
r700_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 773e484..297bc9a 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -238,13 +238,13 @@ config SENSORS_K8TEMP
will be called k8temp.
config SENSORS_K10TEMP
- tristate "AMD Phenom/Sempron/Turion/Opteron temperature sensor"
+ tristate "AMD Family 10h/11h/12h/14h temperature sensor"
depends on X86 && PCI
help
If you say yes here you get support for the temperature
sensor(s) inside your CPU. Supported are later revisions of
- the AMD Family 10h and all revisions of the AMD Family 11h
- microarchitectures.
+ the AMD Family 10h and all revisions of the AMD Family 11h,
+ 12h (Llano), and 14h (Brazos) microarchitectures.
This driver can also be built as a module. If so, the module
will be called k10temp.
@@ -455,13 +455,14 @@ config SENSORS_JZ4740
called jz4740-hwmon.
config SENSORS_JC42
- tristate "JEDEC JC42.4 compliant temperature sensors"
+ tristate "JEDEC JC42.4 compliant memory module temperature sensors"
depends on I2C
help
- If you say yes here you get support for Jedec JC42.4 compliant
- temperature sensors. Support will include, but not be limited to,
- ADT7408, CAT34TS02,, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
- MCP9843, SE97, SE98, STTS424, TSE2002B3, and TS3000B3.
+ If you say yes here, you get support for JEDEC JC42.4 compliant
+ temperature sensors, which are used on many DDR3 memory modules for
+ mobile devices and servers. Support will include, but not be limited
+ to, ADT7408, CAT34TS02, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
+ MCP9843, SE97, SE98, STTS424(E), TSE2002B3, and TS3000B3.
This driver can also be built as a module. If so, the module
will be called jc42.
@@ -574,7 +575,7 @@ config SENSORS_LM85
help
If you say yes here you get support for National Semiconductor LM85
sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100,
- EMC6D101 and EMC6D102.
+ EMC6D101, EMC6D102, and EMC6D103.
This driver can also be built as a module. If so, the module
will be called lm85.
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index 86d822a..d46c0c7 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -242,6 +242,7 @@ static const struct i2c_device_id ad7414_id[] = {
{ "ad7414", 0 },
{}
};
+MODULE_DEVICE_TABLE(i2c, ad7414_id);
static struct i2c_driver ad7414_driver = {
.driver = {
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index f13c843..5cc3e37 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -334,6 +334,7 @@ static const struct i2c_device_id adt7411_id[] = {
{ "adt7411", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, adt7411_id);
static struct i2c_driver adt7411_driver = {
.driver = {
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 3f49dd3..6e06019 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -37,7 +37,7 @@
#define SIO_F71858FG_LD_HWM 0x02 /* Hardware monitor logical device */
#define SIO_F71882FG_LD_HWM 0x04 /* Hardware monitor logical device */
#define SIO_UNLOCK_KEY 0x87 /* Key to enable Super-I/O */
-#define SIO_LOCK_KEY 0xAA /* Key to diasble Super-I/O */
+#define SIO_LOCK_KEY 0xAA /* Key to disable Super-I/O */
#define SIO_REG_LDSEL 0x07 /* Logical device select */
#define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */
@@ -2111,7 +2111,6 @@ static int f71882fg_remove(struct platform_device *pdev)
int nr_fans = (data->type == f71882fg) ? 4 : 3;
u8 start_reg = f71882fg_read8(data, F71882FG_REG_START);
- platform_set_drvdata(pdev, NULL);
if (data->hwmon_dev)
hwmon_device_unregister(data->hwmon_dev);
@@ -2178,6 +2177,7 @@ static int f71882fg_remove(struct platform_device *pdev)
}
}
+ platform_set_drvdata(pdev, NULL);
kfree(data);
return 0;
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 340fc78..9349912 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -53,6 +53,8 @@ static const unsigned short normal_i2c[] = {
/* Configuration register defines */
#define JC42_CFG_CRIT_ONLY (1 << 2)
+#define JC42_CFG_TCRIT_LOCK (1 << 6)
+#define JC42_CFG_EVENT_LOCK (1 << 7)
#define JC42_CFG_SHUTDOWN (1 << 8)
#define JC42_CFG_HYST_SHIFT 9
#define JC42_CFG_HYST_MASK 0x03
@@ -332,7 +334,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
{
struct i2c_client *client = to_i2c_client(dev);
struct jc42_data *data = i2c_get_clientdata(client);
- long val;
+ unsigned long val;
int diff, hyst;
int err;
int ret = count;
@@ -380,14 +382,14 @@ static ssize_t show_alarm(struct device *dev,
static DEVICE_ATTR(temp1_input, S_IRUGO,
show_temp_input, NULL);
-static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_crit, S_IRUGO,
show_temp_crit, set_temp_crit);
-static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_min, S_IRUGO,
show_temp_min, set_temp_min);
-static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_max, S_IRUGO,
show_temp_max, set_temp_max);
-static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO,
+static DEVICE_ATTR(temp1_crit_hyst, S_IRUGO,
show_temp_crit_hyst, set_temp_crit_hyst);
static DEVICE_ATTR(temp1_max_hyst, S_IRUGO,
show_temp_max_hyst, NULL);
@@ -412,8 +414,31 @@ static struct attribute *jc42_attributes[] = {
NULL
};
+static mode_t jc42_attribute_mode(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct jc42_data *data = i2c_get_clientdata(client);
+ unsigned int config = data->config;
+ bool readonly;
+
+ if (attr == &dev_attr_temp1_crit.attr)
+ readonly = config & JC42_CFG_TCRIT_LOCK;
+ else if (attr == &dev_attr_temp1_min.attr ||
+ attr == &dev_attr_temp1_max.attr)
+ readonly = config & JC42_CFG_EVENT_LOCK;
+ else if (attr == &dev_attr_temp1_crit_hyst.attr)
+ readonly = config & (JC42_CFG_EVENT_LOCK | JC42_CFG_TCRIT_LOCK);
+ else
+ readonly = true;
+
+ return S_IRUGO | (readonly ? 0 : S_IWUSR);
+}
+
static const struct attribute_group jc42_group = {
.attrs = jc42_attributes,
+ .is_visible = jc42_attribute_mode,
};
/* Return 0 if detection is successful, -ENODEV otherwise */
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index da5a240..82bf65a 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -1,5 +1,5 @@
/*
- * k10temp.c - AMD Family 10h/11h processor hardware monitoring
+ * k10temp.c - AMD Family 10h/11h/12h/14h processor hardware monitoring
*
* Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
*
@@ -25,7 +25,7 @@
#include <linux/pci.h>
#include <asm/processor.h>
-MODULE_DESCRIPTION("AMD Family 10h/11h CPU core temperature monitor");
+MODULE_DESCRIPTION("AMD Family 10h/11h/12h/14h CPU core temperature monitor");
MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
MODULE_LICENSE("GPL");
@@ -208,6 +208,7 @@ static void __devexit k10temp_remove(struct pci_dev *pdev)
static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{}
};
MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 1e22984..d2cc286 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -41,7 +41,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
enum chips {
any_chip, lm85b, lm85c,
adm1027, adt7463, adt7468,
- emc6d100, emc6d102
+ emc6d100, emc6d102, emc6d103
};
/* The LM85 registers */
@@ -90,6 +90,9 @@ enum chips {
#define LM85_VERSTEP_EMC6D100_A0 0x60
#define LM85_VERSTEP_EMC6D100_A1 0x61
#define LM85_VERSTEP_EMC6D102 0x65
+#define LM85_VERSTEP_EMC6D103_A0 0x68
+#define LM85_VERSTEP_EMC6D103_A1 0x69
+#define LM85_VERSTEP_EMC6D103S 0x6A /* Also known as EMC6D103:A2 */
#define LM85_REG_CONFIG 0x40
@@ -348,6 +351,7 @@ static const struct i2c_device_id lm85_id[] = {
{ "emc6d100", emc6d100 },
{ "emc6d101", emc6d100 },
{ "emc6d102", emc6d102 },
+ { "emc6d103", emc6d103 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm85_id);
@@ -1250,6 +1254,20 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
case LM85_VERSTEP_EMC6D102:
type_name = "emc6d102";
break;
+ case LM85_VERSTEP_EMC6D103_A0:
+ case LM85_VERSTEP_EMC6D103_A1:
+ type_name = "emc6d103";
+ break;
+ /*
+ * Registers apparently missing in EMC6D103S/EMC6D103:A2
+ * compared to EMC6D103:A0, EMC6D103:A1, and EMC6D102
+ * (according to the data sheets), but used unconditionally
+ * in the driver: 62[5:7], 6D[0:7], and 6E[0:7].
+ * So skip EMC6D103S for now.
+ case LM85_VERSTEP_EMC6D103S:
+ type_name = "emc6d103s";
+ break;
+ */
}
} else {
dev_dbg(&adapter->dev,
@@ -1283,6 +1301,7 @@ static int lm85_probe(struct i2c_client *client,
case adt7468:
case emc6d100:
case emc6d102:
+ case emc6d103:
data->freq_map = adm1027_freq_map;
break;
default:
@@ -1468,7 +1487,7 @@ static struct lm85_data *lm85_update_device(struct device *dev)
/* More alarm bits */
data->alarms |= lm85_read_value(client,
EMC6D100_REG_ALARM3) << 16;
- } else if (data->type == emc6d102) {
+ } else if (data->type == emc6d102 || data->type == emc6d103) {
/* Have to read LSB bits after the MSB ones because
the reading of the MSB bits has frozen the
LSBs (backward from the ADM1027).
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 2e067dd..50ea1f4 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -29,6 +29,7 @@
#include <linux/pci.h>
#include <linux/mutex.h>
#include <linux/ktime.h>
+#include <linux/slab.h>
#define PCH_EVENT_SET 0 /* I2C Interrupt Event Set Status */
#define PCH_EVENT_NONE 1 /* I2C Interrupt Event Clear Status */
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index ef3bcb1..1b46a9d 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -249,7 +249,7 @@ static struct i2c_adapter ocores_adapter = {
static int ocores_i2c_of_probe(struct platform_device* pdev,
struct ocores_i2c* i2c)
{
- __be32* val;
+ const __be32* val;
val = of_get_property(pdev->dev.of_node, "regstep", NULL);
if (!val) {
@@ -330,9 +330,7 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
i2c->adap = ocores_adapter;
i2c_set_adapdata(&i2c->adap, i2c);
i2c->adap.dev.parent = &pdev->dev;
-#ifdef CONFIG_OF
i2c->adap.dev.of_node = pdev->dev.of_node;
-#endif
/* add i2c adapter to i2c tree */
ret = i2c_add_adapter(&i2c->adap);
@@ -390,15 +388,11 @@ static int ocores_i2c_resume(struct platform_device *pdev)
#define ocores_i2c_resume NULL
#endif
-#ifdef CONFIG_OF
static struct of_device_id ocores_i2c_match[] = {
- {
- .compatible = "opencores,i2c-ocores",
- },
- {},
+ { .compatible = "opencores,i2c-ocores", },
+ {},
};
MODULE_DEVICE_TABLE(of, ocores_i2c_match);
-#endif
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:ocores-i2c");
@@ -411,9 +405,7 @@ static struct platform_driver ocores_i2c_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "ocores-i2c",
-#ifdef CONFIG_OF
- .of_match_table = ocores_i2c_match,
-#endif
+ .of_match_table = ocores_i2c_match,
},
};
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index b605ff3..58a58c7 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -378,9 +378,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
* REVISIT: Some wkup sources might not be needed.
*/
dev->westate = OMAP_I2C_WE_ALL;
- if (dev->rev < OMAP_I2C_REV_ON_4430)
- omap_i2c_write_reg(dev, OMAP_I2C_WE_REG,
- dev->westate);
+ omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
}
}
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
@@ -847,11 +845,15 @@ complete:
dev_err(dev->dev, "Arbitration lost\n");
err |= OMAP_I2C_STAT_AL;
}
+ /*
+ * ProDB0017052: Clear ARDY bit twice
+ */
if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
OMAP_I2C_STAT_AL)) {
omap_i2c_ack_stat(dev, stat &
(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
- OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
+ OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR |
+ OMAP_I2C_STAT_ARDY));
omap_i2c_complete_cmd(dev, err);
return IRQ_HANDLED;
}
@@ -1137,12 +1139,41 @@ omap_i2c_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_SUSPEND
+static int omap_i2c_suspend(struct device *dev)
+{
+ if (!pm_runtime_suspended(dev))
+ if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
+ dev->bus->pm->runtime_suspend(dev);
+
+ return 0;
+}
+
+static int omap_i2c_resume(struct device *dev)
+{
+ if (!pm_runtime_suspended(dev))
+ if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
+ dev->bus->pm->runtime_resume(dev);
+
+ return 0;
+}
+
+static struct dev_pm_ops omap_i2c_pm_ops = {
+ .suspend = omap_i2c_suspend,
+ .resume = omap_i2c_resume,
+};
+#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
+#else
+#define OMAP_I2C_PM_OPS NULL
+#endif
+
static struct platform_driver omap_i2c_driver = {
.probe = omap_i2c_probe,
.remove = omap_i2c_remove,
.driver = {
.name = "omap_i2c",
.owner = THIS_MODULE,
+ .pm = OMAP_I2C_PM_OPS,
},
};
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 495be45..266135d 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -942,7 +942,7 @@ stu300_probe(struct platform_device *pdev)
adap->owner = THIS_MODULE;
/* DDC class but actually often used for more generic I2C */
adap->class = I2C_CLASS_DDC;
- strncpy(adap->name, "ST Microelectronics DDC I2C adapter",
+ strlcpy(adap->name, "ST Microelectronics DDC I2C adapter",
sizeof(adap->name));
adap->nr = bus_nr;
adap->algo = &stu300_algo;
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index f0bd5bc..045ba6e 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -537,9 +537,7 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
client->dev.parent = &client->adapter->dev;
client->dev.bus = &i2c_bus_type;
client->dev.type = &i2c_client_type;
-#ifdef CONFIG_OF
client->dev.of_node = info->of_node;
-#endif
dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
client->addr);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 1fa091e..4a5c4a4 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -62,6 +62,7 @@
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <asm/mwait.h>
+#include <asm/msr.h>
#define INTEL_IDLE_VERSION "0.4"
#define PREFIX "intel_idle: "
@@ -85,6 +86,12 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
static struct cpuidle_state *cpuidle_state_table;
/*
+ * Hardware C-state auto-demotion may not always be optimal.
+ * Indicate which enable bits to clear here.
+ */
+static unsigned long long auto_demotion_disable_flags;
+
+/*
* Set this flag for states where the HW flushes the TLB for us
* and so we don't need cross-calls to keep it consistent.
* If this flag is set, SW flushes the TLB, so even if the
@@ -281,6 +288,15 @@ static struct notifier_block setup_broadcast_notifier = {
.notifier_call = setup_broadcast_cpuhp_notify,
};
+static void auto_demotion_disable(void *dummy)
+{
+ unsigned long long msr_bits;
+
+ rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
+ msr_bits &= ~auto_demotion_disable_flags;
+ wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
+}
+
/*
* intel_idle_probe()
*/
@@ -324,11 +340,17 @@ static int intel_idle_probe(void)
case 0x25: /* Westmere */
case 0x2C: /* Westmere */
cpuidle_state_table = nehalem_cstates;
+ auto_demotion_disable_flags =
+ (NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE);
break;
case 0x1C: /* 28 - Atom Processor */
+ cpuidle_state_table = atom_cstates;
+ break;
+
case 0x26: /* 38 - Lincroft Atom Processor */
cpuidle_state_table = atom_cstates;
+ auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE;
break;
case 0x2A: /* SNB */
@@ -436,6 +458,8 @@ static int intel_idle_cpuidle_devices_init(void)
return -EIO;
}
}
+ if (auto_demotion_disable_flags)
+ smp_call_function(auto_demotion_disable, NULL, 1);
return 0;
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 64e0903..f804e28 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1988,6 +1988,10 @@ int ib_send_cm_dreq(struct ib_cm_id *cm_id,
goto out;
}
+ if (cm_id->lap_state == IB_CM_LAP_SENT ||
+ cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
+ ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret) {
cm_enter_timewait(cm_id_priv);
@@ -2129,6 +2133,10 @@ static int cm_dreq_handler(struct cm_work *work)
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
break;
case IB_CM_ESTABLISHED:
+ if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
+ cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
+ ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
+ break;
case IB_CM_MRA_REP_RCVD:
break;
case IB_CM_TIMEWAIT:
@@ -2349,9 +2357,18 @@ static int cm_rej_handler(struct cm_work *work)
/* fall through */
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
- case IB_CM_ESTABLISHED:
cm_enter_timewait(cm_id_priv);
break;
+ case IB_CM_ESTABLISHED:
+ if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
+ cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
+ if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
+ ib_cancel_mad(cm_id_priv->av.port->mad_agent,
+ cm_id_priv->msg);
+ cm_enter_timewait(cm_id_priv);
+ break;
+ }
+ /* fall through */
default:
spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
@@ -2989,6 +3006,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
goto out; /* No match. */
}
atomic_inc(&cur_cm_id_priv->refcount);
+ atomic_inc(&cm_id_priv->refcount);
spin_unlock_irq(&cm.lock);
cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 6884da2..5ed9d25 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -308,11 +308,13 @@ static inline void release_mc(struct kref *kref)
kfree(mc);
}
-static void cma_detach_from_dev(struct rdma_id_private *id_priv)
+static void cma_release_dev(struct rdma_id_private *id_priv)
{
+ mutex_lock(&lock);
list_del(&id_priv->list);
cma_deref_dev(id_priv->cma_dev);
id_priv->cma_dev = NULL;
+ mutex_unlock(&lock);
}
static int cma_set_qkey(struct rdma_id_private *id_priv)
@@ -373,6 +375,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
+ mutex_lock(&lock);
iboe_addr_get_sgid(dev_addr, &iboe_gid);
memcpy(&gid, dev_addr->src_dev_addr +
rdma_addr_gid_offset(dev_addr), sizeof gid);
@@ -398,6 +401,7 @@ out:
if (!ret)
cma_attach_to_dev(id_priv, cma_dev);
+ mutex_unlock(&lock);
return ret;
}
@@ -904,9 +908,14 @@ void rdma_destroy_id(struct rdma_cm_id *id)
state = cma_exch(id_priv, CMA_DESTROYING);
cma_cancel_operation(id_priv, state);
- mutex_lock(&lock);
+ /*
+ * Wait for any active callback to finish. New callbacks will find
+ * the id_priv state set to destroying and abort.
+ */
+ mutex_lock(&id_priv->handler_mutex);
+ mutex_unlock(&id_priv->handler_mutex);
+
if (id_priv->cma_dev) {
- mutex_unlock(&lock);
switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
case RDMA_TRANSPORT_IB:
if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
@@ -920,10 +929,8 @@ void rdma_destroy_id(struct rdma_cm_id *id)
break;
}
cma_leave_mc_groups(id_priv);
- mutex_lock(&lock);
- cma_detach_from_dev(id_priv);
+ cma_release_dev(id_priv);
}
- mutex_unlock(&lock);
cma_release_port(id_priv);
cma_deref_id(id_priv);
@@ -1200,9 +1207,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
}
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
- mutex_lock(&lock);
ret = cma_acquire_dev(conn_id);
- mutex_unlock(&lock);
if (ret)
goto release_conn_id;
@@ -1210,6 +1215,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
cm_id->context = conn_id;
cm_id->cm_handler = cma_ib_handler;
+ /*
+ * Protect against the user destroying conn_id from another thread
+ * until we're done accessing it.
+ */
+ atomic_inc(&conn_id->refcount);
ret = conn_id->id.event_handler(&conn_id->id, &event);
if (!ret) {
/*
@@ -1222,8 +1232,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
mutex_unlock(&lock);
mutex_unlock(&conn_id->handler_mutex);
+ cma_deref_id(conn_id);
goto out;
}
+ cma_deref_id(conn_id);
/* Destroy the CM ID by returning a non-zero value. */
conn_id->cm_id.ib = NULL;
@@ -1394,9 +1406,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
goto out;
}
- mutex_lock(&lock);
ret = cma_acquire_dev(conn_id);
- mutex_unlock(&lock);
if (ret) {
mutex_unlock(&conn_id->handler_mutex);
rdma_destroy_id(new_cm_id);
@@ -1425,17 +1435,25 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
event.param.conn.private_data_len = iw_event->private_data_len;
event.param.conn.initiator_depth = attr.max_qp_init_rd_atom;
event.param.conn.responder_resources = attr.max_qp_rd_atom;
+
+ /*
+ * Protect against the user destroying conn_id from another thread
+ * until we're done accessing it.
+ */
+ atomic_inc(&conn_id->refcount);
ret = conn_id->id.event_handler(&conn_id->id, &event);
if (ret) {
/* User wants to destroy the CM ID */
conn_id->cm_id.iw = NULL;
cma_exch(conn_id, CMA_DESTROYING);
mutex_unlock(&conn_id->handler_mutex);
+ cma_deref_id(conn_id);
rdma_destroy_id(&conn_id->id);
goto out;
}
mutex_unlock(&conn_id->handler_mutex);
+ cma_deref_id(conn_id);
out:
if (dev)
@@ -1951,20 +1969,11 @@ static void addr_handler(int status, struct sockaddr *src_addr,
memset(&event, 0, sizeof event);
mutex_lock(&id_priv->handler_mutex);
-
- /*
- * Grab mutex to block rdma_destroy_id() from removing the device while
- * we're trying to acquire it.
- */
- mutex_lock(&lock);
- if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) {
- mutex_unlock(&lock);
+ if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED))
goto out;
- }
if (!status && !id_priv->cma_dev)
status = cma_acquire_dev(id_priv);
- mutex_unlock(&lock);
if (status) {
if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
@@ -2265,9 +2274,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
if (ret)
goto err1;
- mutex_lock(&lock);
ret = cma_acquire_dev(id_priv);
- mutex_unlock(&lock);
if (ret)
goto err1;
}
@@ -2279,11 +2286,8 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
return 0;
err2:
- if (id_priv->cma_dev) {
- mutex_lock(&lock);
- cma_detach_from_dev(id_priv);
- mutex_unlock(&lock);
- }
+ if (id_priv->cma_dev)
+ cma_release_dev(id_priv);
err1:
cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE);
return ret;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 8b00e6c..b4d9e4c 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -61,9 +61,9 @@ static char *states[] = {
NULL,
};
-static int dack_mode;
+static int dack_mode = 1;
module_param(dack_mode, int, 0644);
-MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)");
+MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
int c4iw_max_read_depth = 8;
module_param(c4iw_max_read_depth, int, 0644);
@@ -482,6 +482,7 @@ static int send_connect(struct c4iw_ep *ep)
TX_CHAN(ep->tx_chan) |
SMAC_SEL(ep->smac_idx) |
DSCP(ep->tos) |
+ ULP_MODE(ULP_MODE_TCPDDP) |
RCV_BUFSIZ(rcv_win>>10);
opt2 = RX_CHANNEL(0) |
RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
@@ -1274,6 +1275,7 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
TX_CHAN(ep->tx_chan) |
SMAC_SEL(ep->smac_idx) |
DSCP(ep->tos) |
+ ULP_MODE(ULP_MODE_TCPDDP) |
RCV_BUFSIZ(rcv_win>>10);
opt2 = RX_CHANNEL(0) |
RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 54fbc11..e29172c 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -87,17 +87,22 @@ static int dump_qp(int id, void *p, void *data)
return 1;
if (qp->ep)
- cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u "
+ cc = snprintf(qpd->buf + qpd->pos, space,
+ "qp sq id %u rq id %u state %u onchip %u "
"ep tid %u state %u %pI4:%u->%pI4:%u\n",
- qp->wq.sq.qid, (int)qp->attr.state,
+ qp->wq.sq.qid, qp->wq.rq.qid, (int)qp->attr.state,
+ qp->wq.sq.flags & T4_SQ_ONCHIP,
qp->ep->hwtid, (int)qp->ep->com.state,
&qp->ep->com.local_addr.sin_addr.s_addr,
ntohs(qp->ep->com.local_addr.sin_port),
&qp->ep->com.remote_addr.sin_addr.s_addr,
ntohs(qp->ep->com.remote_addr.sin_port));
else
- cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u\n",
- qp->wq.sq.qid, (int)qp->attr.state);
+ cc = snprintf(qpd->buf + qpd->pos, space,
+ "qp sq id %u rq id %u state %u onchip %u\n",
+ qp->wq.sq.qid, qp->wq.rq.qid,
+ (int)qp->attr.state,
+ qp->wq.sq.flags & T4_SQ_ONCHIP);
if (cc < space)
qpd->pos += cc;
return 0;
@@ -368,7 +373,6 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
static void c4iw_remove(struct c4iw_dev *dev)
{
PDBG("%s c4iw_dev %p\n", __func__, dev);
- cancel_delayed_work_sync(&dev->db_drop_task);
list_del(&dev->entry);
if (dev->registered)
c4iw_unregister_device(dev);
@@ -523,8 +527,16 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
case CXGB4_STATE_START_RECOVERY:
printk(KERN_INFO MOD "%s: Fatal Error\n",
pci_name(dev->rdev.lldi.pdev));
- if (dev->registered)
+ dev->rdev.flags |= T4_FATAL_ERROR;
+ if (dev->registered) {
+ struct ib_event event;
+
+ memset(&event, 0, sizeof event);
+ event.event = IB_EVENT_DEVICE_FATAL;
+ event.device = &dev->ibdev;
+ ib_dispatch_event(&event);
c4iw_unregister_device(dev);
+ }
break;
case CXGB4_STATE_DETACH:
printk(KERN_INFO MOD "%s: Detach\n",
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 2fe19ec..9f6166f 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -176,7 +176,6 @@ struct c4iw_dev {
struct idr mmidr;
spinlock_t lock;
struct list_head entry;
- struct delayed_work db_drop_task;
struct dentry *debugfs_root;
u8 registered;
};
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 4f0be25..70a5a3c 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -31,9 +31,9 @@
*/
#include "iw_cxgb4.h"
-static int ocqp_support;
+static int ocqp_support = 1;
module_param(ocqp_support, int, 0644);
-MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=0)");
+MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
{
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 7000442..24af12f 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -507,8 +507,14 @@ static inline void t4_swcq_consume(struct t4_cq *cq)
static inline void t4_hwcq_consume(struct t4_cq *cq)
{
cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
- if (++cq->cidx_inc == cq->size)
+ if (++cq->cidx_inc == (cq->size >> 4)) {
+ u32 val;
+
+ val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) |
+ INGRESSQID(cq->cqid);
+ writel(val, cq->gts);
cq->cidx_inc = 0;
+ }
if (++cq->cidx == cq->size) {
cq->cidx = 0;
cq->gen ^= 1;
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
index b8cb2f1..8991677 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -557,6 +557,7 @@ static ssize_t store_reset(struct device *dev,
dev_info(dev,"Unit %d is disabled, can't reset\n",
dd->ipath_unit);
ret = -EINVAL;
+ goto bail;
}
ret = ipath_reset_device(dd->ipath_unit);
bail:
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 8b606fd..08c1948 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -2610,9 +2610,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
netif_carrier_on(nesvnic->netdev);
spin_lock(&nesvnic->port_ibevent_lock);
- if (nesdev->iw_status == 0) {
- nesdev->iw_status = 1;
- nes_port_ibevent(nesvnic);
+ if (nesvnic->of_device_registered) {
+ if (nesdev->iw_status == 0) {
+ nesdev->iw_status = 1;
+ nes_port_ibevent(nesvnic);
+ }
}
spin_unlock(&nesvnic->port_ibevent_lock);
}
@@ -2642,9 +2644,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
netif_carrier_off(nesvnic->netdev);
spin_lock(&nesvnic->port_ibevent_lock);
- if (nesdev->iw_status == 1) {
- nesdev->iw_status = 0;
- nes_port_ibevent(nesvnic);
+ if (nesvnic->of_device_registered) {
+ if (nesdev->iw_status == 1) {
+ nesdev->iw_status = 0;
+ nes_port_ibevent(nesvnic);
+ }
}
spin_unlock(&nesvnic->port_ibevent_lock);
}
@@ -2703,9 +2707,11 @@ void nes_recheck_link_status(struct work_struct *work)
netif_carrier_on(nesvnic->netdev);
spin_lock(&nesvnic->port_ibevent_lock);
- if (nesdev->iw_status == 0) {
- nesdev->iw_status = 1;
- nes_port_ibevent(nesvnic);
+ if (nesvnic->of_device_registered) {
+ if (nesdev->iw_status == 0) {
+ nesdev->iw_status = 1;
+ nes_port_ibevent(nesvnic);
+ }
}
spin_unlock(&nesvnic->port_ibevent_lock);
}
@@ -2723,9 +2729,11 @@ void nes_recheck_link_status(struct work_struct *work)
netif_carrier_off(nesvnic->netdev);
spin_lock(&nesvnic->port_ibevent_lock);
- if (nesdev->iw_status == 1) {
- nesdev->iw_status = 0;
- nes_port_ibevent(nesvnic);
+ if (nesvnic->of_device_registered) {
+ if (nesdev->iw_status == 1) {
+ nesdev->iw_status = 0;
+ nes_port_ibevent(nesvnic);
+ }
}
spin_unlock(&nesvnic->port_ibevent_lock);
}
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index b01809a..4a2d21e 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -5582,9 +5582,16 @@ static void qsfp_7322_event(struct work_struct *work)
* even on failure to read cable information. We don't
* get here for QME, so IS_QME check not needed here.
*/
- le2 = (!ret && qd->cache.atten[1] >= qib_long_atten &&
- !ppd->dd->cspec->r1 && QSFP_IS_CU(qd->cache.tech)) ?
- LE2_5m : LE2_DEFAULT;
+ if (!ret && !ppd->dd->cspec->r1) {
+ if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
+ le2 = LE2_QME;
+ else if (qd->cache.atten[1] >= qib_long_atten &&
+ QSFP_IS_CU(qd->cache.tech))
+ le2 = LE2_5m;
+ else
+ le2 = LE2_DEFAULT;
+ } else
+ le2 = LE2_DEFAULT;
ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
init_txdds_table(ppd, 0);
}
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 5ad224e..8fd3df5 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -464,8 +464,9 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
memset(smp->data, 0, sizeof(smp->data));
/* Only return the mkey if the protection field allows it. */
- if (smp->method == IB_MGMT_METHOD_SET || ibp->mkey == smp->mkey ||
- ibp->mkeyprot == 0)
+ if (!(smp->method == IB_MGMT_METHOD_GET &&
+ ibp->mkey != smp->mkey &&
+ ibp->mkeyprot == 1))
pip->mkey = ibp->mkey;
pip->gid_prefix = ibp->gid_prefix;
lid = ppd->lid;
@@ -705,7 +706,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
lwe = pip->link_width_enabled;
if (lwe) {
if (lwe == 0xFF)
- lwe = ppd->link_width_supported;
+ set_link_width_enabled(ppd, ppd->link_width_supported);
else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
smp->status |= IB_SMP_INVALID_FIELD;
else if (lwe != ppd->link_width_enabled)
@@ -720,7 +721,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
* speeds.
*/
if (lse == 15)
- lse = ppd->link_speed_supported;
+ set_link_speed_enabled(ppd,
+ ppd->link_speed_supported);
else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
smp->status |= IB_SMP_INVALID_FIELD;
else if (lse != ppd->link_speed_enabled)
@@ -849,7 +851,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
if (clientrereg)
pip->clientrereg_resv_subnetto |= 0x80;
- goto done;
+ goto get_only;
err:
smp->status |= IB_SMP_INVALID_FIELD;
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h
index 19b527b..c109bbd 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.h
+++ b/drivers/infiniband/hw/qib/qib_qsfp.h
@@ -79,6 +79,8 @@
extern const char *const qib_qsfp_devtech[16];
/* Active Equalization includes fiber, copper full EQ, and copper near Eq */
#define QSFP_IS_ACTIVE(tech) ((0xA2FF >> ((tech) >> 4)) & 1)
+/* Active Equalization includes fiber, copper full EQ, and copper far Eq */
+#define QSFP_IS_ACTIVE_FAR(tech) ((0x32FF >> ((tech) >> 4)) & 1)
/* Attenuation should be valid for copper other than full/near Eq */
#define QSFP_HAS_ATTEN(tech) ((0x4D00 >> ((tech) >> 4)) & 1)
/* Length is only valid if technology is "copper" */
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 8245237..eca0c41 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1005,7 +1005,8 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
* there are still requests that haven't been acked.
*/
if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
- !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)))
+ !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) &&
+ (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
start_timer(qp);
while (qp->s_last != qp->s_acked) {
@@ -1439,6 +1440,8 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
}
spin_lock_irqsave(&qp->s_lock, flags);
+ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+ goto ack_done;
/* Ignore invalid responses. */
if (qib_cmp24(psn, qp->s_next_psn) >= 0)
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 23cf8fc..5b8f59d 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -360,7 +360,7 @@ static int gameport_queue_event(void *object, struct module *owner,
event->owner = owner;
list_add_tail(&event->node, &gameport_event_list);
- schedule_work(&gameport_event_work);
+ queue_work(system_long_wq, &gameport_event_work);
out:
spin_unlock_irqrestore(&gameport_event_lock, flags);
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index ac471b7..99ce903 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -71,8 +71,9 @@ struct tegra_kbc {
spinlock_t lock;
unsigned int repoll_dly;
unsigned long cp_dly_jiffies;
+ bool use_fn_map;
const struct tegra_kbc_platform_data *pdata;
- unsigned short keycode[KBC_MAX_KEY];
+ unsigned short keycode[KBC_MAX_KEY * 2];
unsigned short current_keys[KBC_MAX_KPENT];
unsigned int num_pressed_keys;
struct timer_list timer;
@@ -178,6 +179,40 @@ static const u32 tegra_kbc_default_keymap[] = {
KEY(15, 5, KEY_F2),
KEY(15, 6, KEY_CAPSLOCK),
KEY(15, 7, KEY_F6),
+
+ /* Software Handled Function Keys */
+ KEY(20, 0, KEY_KP7),
+
+ KEY(21, 0, KEY_KP9),
+ KEY(21, 1, KEY_KP8),
+ KEY(21, 2, KEY_KP4),
+ KEY(21, 4, KEY_KP1),
+
+ KEY(22, 1, KEY_KPSLASH),
+ KEY(22, 2, KEY_KP6),
+ KEY(22, 3, KEY_KP5),
+ KEY(22, 4, KEY_KP3),
+ KEY(22, 5, KEY_KP2),
+ KEY(22, 7, KEY_KP0),
+
+ KEY(27, 1, KEY_KPASTERISK),
+ KEY(27, 3, KEY_KPMINUS),
+ KEY(27, 4, KEY_KPPLUS),
+ KEY(27, 5, KEY_KPDOT),
+
+ KEY(28, 5, KEY_VOLUMEUP),
+
+ KEY(29, 3, KEY_HOME),
+ KEY(29, 4, KEY_END),
+ KEY(29, 5, KEY_BRIGHTNESSDOWN),
+ KEY(29, 6, KEY_VOLUMEDOWN),
+ KEY(29, 7, KEY_BRIGHTNESSUP),
+
+ KEY(30, 0, KEY_NUMLOCK),
+ KEY(30, 1, KEY_SCROLLLOCK),
+ KEY(30, 2, KEY_MUTE),
+
+ KEY(31, 4, KEY_HELP),
};
static const struct matrix_keymap_data tegra_kbc_default_keymap_data = {
@@ -224,6 +259,7 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
unsigned int i;
unsigned int num_down = 0;
unsigned long flags;
+ bool fn_keypress = false;
spin_lock_irqsave(&kbc->lock, flags);
for (i = 0; i < KBC_MAX_KPENT; i++) {
@@ -237,11 +273,28 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
MATRIX_SCAN_CODE(row, col, KBC_ROW_SHIFT);
scancodes[num_down] = scancode;
- keycodes[num_down++] = kbc->keycode[scancode];
+ keycodes[num_down] = kbc->keycode[scancode];
+ /* If driver uses Fn map, do not report the Fn key. */
+ if ((keycodes[num_down] == KEY_FN) && kbc->use_fn_map)
+ fn_keypress = true;
+ else
+ num_down++;
}
val >>= 8;
}
+
+ /*
+ * If the platform uses Fn keymaps, translate keys on a Fn keypress.
+ * Function keycodes are KBC_MAX_KEY apart from the plain keycodes.
+ */
+ if (fn_keypress) {
+ for (i = 0; i < num_down; i++) {
+ scancodes[i] += KBC_MAX_KEY;
+ keycodes[i] = kbc->keycode[scancodes[i]];
+ }
+ }
+
spin_unlock_irqrestore(&kbc->lock, flags);
tegra_kbc_report_released_keys(kbc->idev,
@@ -594,8 +647,11 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
input_dev->keycode = kbc->keycode;
input_dev->keycodesize = sizeof(kbc->keycode[0]);
- input_dev->keycodemax = ARRAY_SIZE(kbc->keycode);
+ input_dev->keycodemax = KBC_MAX_KEY;
+ if (pdata->use_fn_map)
+ input_dev->keycodemax *= 2;
+ kbc->use_fn_map = pdata->use_fn_map;
keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data;
matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT,
input_dev->keycode, input_dev->keybit);
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 25e5d04..7453938 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -51,6 +51,29 @@
#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20)
#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12)
#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16)
+
+/*
+ * The following describes response for the 0x0c query.
+ *
+ * byte mask name meaning
+ * ---- ---- ------- ------------
+ * 1 0x01 adjustable threshold capacitive button sensitivity
+ * can be adjusted
+ * 1 0x02 report max query 0x0d gives max coord reported
+ * 1 0x04 clearpad sensor is ClearPad product
+ * 1 0x08 advanced gesture not particularly meaningful
+ * 1 0x10 clickpad bit 0 1-button ClickPad
+ * 1 0x60 multifinger mode identifies firmware finger counting
+ * (not reporting!) algorithm.
+ * Not particularly meaningful
+ * 1 0x80 covered pad W clipped to 14, 15 == pad mostly covered
+ * 2 0x01 clickpad bit 1 2-button ClickPad
+ * 2 0x02 deluxe LED controls touchpad support LED commands
+ * ala multimedia control bar
+ * 2 0x04 reduced filtering firmware does less filtering on
+ * position data, driver should watch
+ * for noise.
+ */
#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
#define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000)
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 7c38d1f..ba70058 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -299,7 +299,7 @@ static int serio_queue_event(void *object, struct module *owner,
event->owner = owner;
list_add_tail(&event->node, &serio_event_list);
- schedule_work(&serio_event_work);
+ queue_work(system_long_wq, &serio_event_work);
out:
spin_unlock_irqrestore(&serio_event_lock, flags);
diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c
index c8c136c..4303149 100644
--- a/drivers/input/touchscreen/tps6507x-ts.c
+++ b/drivers/input/touchscreen/tps6507x-ts.c
@@ -43,7 +43,6 @@ struct tps6507x_ts {
struct input_dev *input_dev;
struct device *dev;
char phys[32];
- struct workqueue_struct *wq;
struct delayed_work work;
unsigned polling; /* polling is active */
struct ts_event tc;
@@ -220,8 +219,8 @@ done:
poll = 1;
if (poll) {
- schd = queue_delayed_work(tsc->wq, &tsc->work,
- msecs_to_jiffies(tsc->poll_period));
+ schd = schedule_delayed_work(&tsc->work,
+ msecs_to_jiffies(tsc->poll_period));
if (schd)
tsc->polling = 1;
else {
@@ -303,7 +302,6 @@ static int tps6507x_ts_probe(struct platform_device *pdev)
tsc->input_dev = input_dev;
INIT_DELAYED_WORK(&tsc->work, tps6507x_ts_handler);
- tsc->wq = create_workqueue("TPS6507x Touchscreen");
if (init_data) {
tsc->poll_period = init_data->poll_period;
@@ -325,8 +323,8 @@ static int tps6507x_ts_probe(struct platform_device *pdev)
if (error)
goto err2;
- schd = queue_delayed_work(tsc->wq, &tsc->work,
- msecs_to_jiffies(tsc->poll_period));
+ schd = schedule_delayed_work(&tsc->work,
+ msecs_to_jiffies(tsc->poll_period));
if (schd)
tsc->polling = 1;
@@ -341,7 +339,6 @@ static int tps6507x_ts_probe(struct platform_device *pdev)
err2:
cancel_delayed_work_sync(&tsc->work);
- destroy_workqueue(tsc->wq);
input_free_device(input_dev);
err1:
kfree(tsc);
@@ -357,7 +354,6 @@ static int __devexit tps6507x_ts_remove(struct platform_device *pdev)
struct input_dev *input_dev = tsc->input_dev;
cancel_delayed_work_sync(&tsc->work);
- destroy_workqueue(tsc->wq);
input_unregister_device(input_dev);
diff --git a/drivers/isdn/hardware/eicon/istream.c b/drivers/isdn/hardware/eicon/istream.c
index 18f8798..7bd5baa 100644
--- a/drivers/isdn/hardware/eicon/istream.c
+++ b/drivers/isdn/hardware/eicon/istream.c
@@ -62,7 +62,7 @@ void diva_xdi_provide_istream_info (ADAPTER* a,
stream interface.
If synchronous service was requested, then function
does return amount of data written to stream.
- 'final' does indicate that pice of data to be written is
+ 'final' does indicate that piece of data to be written is
final part of frame (necessary only by structured datatransfer)
return 0 if zero lengh packet was written
return -1 if stream is full
diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c
index 0858791..cfff0c4 100644
--- a/drivers/isdn/hisax/isdnl2.c
+++ b/drivers/isdn/hisax/isdnl2.c
@@ -1247,10 +1247,10 @@ static void
l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
- struct sk_buff *skb, *oskb;
+ struct sk_buff *skb;
struct Layer2 *l2 = &st->l2;
u_char header[MAX_HEADER_LEN];
- int i;
+ int i, hdr_space_needed;
int unsigned p1;
u_long flags;
@@ -1261,6 +1261,16 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
if (!skb)
return;
+ hdr_space_needed = l2headersize(l2, 0);
+ if (hdr_space_needed > skb_headroom(skb)) {
+ struct sk_buff *orig_skb = skb;
+
+ skb = skb_realloc_headroom(skb, hdr_space_needed);
+ if (!skb) {
+ dev_kfree_skb(orig_skb);
+ return;
+ }
+ }
spin_lock_irqsave(&l2->lock, flags);
if(test_bit(FLG_MOD128, &l2->flag))
p1 = (l2->vs - l2->va) % 128;
@@ -1285,19 +1295,7 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
l2->vs = (l2->vs + 1) % 8;
}
spin_unlock_irqrestore(&l2->lock, flags);
- p1 = skb->data - skb->head;
- if (p1 >= i)
- memcpy(skb_push(skb, i), header, i);
- else {
- printk(KERN_WARNING
- "isdl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
- oskb = skb;
- skb = alloc_skb(oskb->len + i, GFP_ATOMIC);
- memcpy(skb_put(skb, i), header, i);
- skb_copy_from_linear_data(oskb,
- skb_put(skb, oskb->len), oskb->len);
- dev_kfree_skb(oskb);
- }
+ memcpy(skb_push(skb, i), header, i);
st->l2.l2l1(st, PH_PULL | INDICATION, skb);
test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 8a2f767..0ed7f6b 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev)
if (md_check_no_bitmap(mddev))
return -EINVAL;
- mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = linear_conf(mddev, mddev->raid_disks);
if (!conf)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0cc30ec..d5ad772 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -553,6 +553,9 @@ static mddev_t * mddev_find(dev_t unit)
{
mddev_t *mddev, *new = NULL;
+ if (unit && MAJOR(unit) != MD_MAJOR)
+ unit &= ~((1<<MdpMinorShift)-1);
+
retry:
spin_lock(&all_mddevs_lock);
@@ -4138,10 +4141,10 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len)
}
mddev->array_sectors = sectors;
- set_capacity(mddev->gendisk, mddev->array_sectors);
- if (mddev->pers)
+ if (mddev->pers) {
+ set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
-
+ }
return len;
}
@@ -4624,6 +4627,7 @@ static int do_md_run(mddev_t *mddev)
}
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
+ mddev->changed = 1;
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
out:
return err;
@@ -4712,6 +4716,7 @@ static void md_clean(mddev_t *mddev)
mddev->sync_speed_min = mddev->sync_speed_max = 0;
mddev->recovery = 0;
mddev->in_sync = 0;
+ mddev->changed = 0;
mddev->degraded = 0;
mddev->safemode = 0;
mddev->bitmap_info.offset = 0;
@@ -4827,6 +4832,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
set_capacity(disk, 0);
mutex_unlock(&mddev->open_mutex);
+ mddev->changed = 1;
revalidate_disk(disk);
if (mddev->ro)
@@ -6011,7 +6017,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
atomic_inc(&mddev->openers);
mutex_unlock(&mddev->open_mutex);
- check_disk_size_change(mddev->gendisk, bdev);
+ check_disk_change(bdev);
out:
return err;
}
@@ -6026,6 +6032,21 @@ static int md_release(struct gendisk *disk, fmode_t mode)
return 0;
}
+
+static int md_media_changed(struct gendisk *disk)
+{
+ mddev_t *mddev = disk->private_data;
+
+ return mddev->changed;
+}
+
+static int md_revalidate(struct gendisk *disk)
+{
+ mddev_t *mddev = disk->private_data;
+
+ mddev->changed = 0;
+ return 0;
+}
static const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
@@ -6036,6 +6057,8 @@ static const struct block_device_operations md_fops =
.compat_ioctl = md_compat_ioctl,
#endif
.getgeo = md_getgeo,
+ .media_changed = md_media_changed,
+ .revalidate_disk= md_revalidate,
};
static int md_thread(void * arg)
@@ -7338,7 +7361,7 @@ static int __init md_init(void)
{
int ret = -ENOMEM;
- md_wq = alloc_workqueue("md", WQ_RESCUER, 0);
+ md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
if (!md_wq)
goto err_wq;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 7e90b85..12215d4 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -274,6 +274,8 @@ struct mddev_s
atomic_t active; /* general refcount */
atomic_t openers; /* number of active opens */
+ int changed; /* True if we might need to
+ * reread partition info */
int degraded; /* whether md should consider
* adding a spare
*/
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 6d7ddf3..3a62d44 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev)
* bookkeeping area. [whatever we allocate in multipath_run(),
* should be freed in multipath_stop()]
*/
- mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
mddev->private = conf;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 637a968..c0ac457 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -361,7 +361,6 @@ static int raid0_run(mddev_t *mddev)
if (md_check_no_bitmap(mddev))
return -EINVAL;
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
- mddev->queue->queue_lock = &mddev->queue->__queue_lock;
/* if private is not null, we are here after takeover */
if (mddev->private == NULL) {
@@ -670,6 +669,7 @@ static void *raid0_takeover_raid1(mddev_t *mddev)
mddev->new_layout = 0;
mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
mddev->delta_disks = 1 - mddev->raid_disks;
+ mddev->raid_disks = 1;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a23ffa3..06cd712 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -593,7 +593,10 @@ static int flush_pending_writes(conf_t *conf)
if (conf->pending_bio_list.head) {
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
+ /* Only take the spinlock to quiet a warning */
+ spin_lock(conf->mddev->queue->queue_lock);
blk_remove_plug(conf->mddev->queue);
+ spin_unlock(conf->mddev->queue->queue_lock);
spin_unlock_irq(&conf->device_lock);
/* flush any pending bitmap writes to
* disk before proceeding w/ I/O */
@@ -959,7 +962,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_inc(&r1_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- blk_plug_device(mddev->queue);
+ blk_plug_device_unlocked(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@@ -2021,7 +2024,6 @@ static int run(mddev_t *mddev)
if (IS_ERR(conf))
return PTR_ERR(conf);
- mddev->queue->queue_lock = &conf->device_lock;
list_for_each_entry(rdev, &mddev->disks, same_set) {
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3b607b2..747d061 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -662,7 +662,10 @@ static int flush_pending_writes(conf_t *conf)
if (conf->pending_bio_list.head) {
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
+ /* Spinlock only taken to quiet a warning */
+ spin_lock(conf->mddev->queue->queue_lock);
blk_remove_plug(conf->mddev->queue);
+ spin_unlock(conf->mddev->queue->queue_lock);
spin_unlock_irq(&conf->device_lock);
/* flush any pending bitmap writes to disk
* before proceeding w/ I/O */
@@ -971,7 +974,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_inc(&r10_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- blk_plug_device(mddev->queue);
+ blk_plug_device_unlocked(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
@@ -2304,8 +2307,6 @@ static int run(mddev_t *mddev)
if (!conf)
goto out;
- mddev->queue->queue_lock = &conf->device_lock;
-
mddev->thread = conf->thread;
conf->thread = NULL;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7028128..78536fd 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5204,7 +5204,6 @@ static int run(mddev_t *mddev)
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
- mddev->queue->queue_lock = &conf->device_lock;
mddev->queue->unplug_fn = raid5_unplug_queue;
chunk_size = mddev->chunk_sectors << 9;
diff --git a/drivers/media/common/tuners/tda8290.c b/drivers/media/common/tuners/tda8290.c
index bc6a677..8c48521 100644
--- a/drivers/media/common/tuners/tda8290.c
+++ b/drivers/media/common/tuners/tda8290.c
@@ -658,13 +658,13 @@ static int tda8290_probe(struct tuner_i2c_props *i2c_props)
#define TDA8290_ID 0x89
u8 reg = 0x1f, id;
struct i2c_msg msg_read[] = {
- { .addr = 0x4b, .flags = 0, .len = 1, .buf = &reg },
- { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id },
+ { .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = &reg },
+ { .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id },
};
/* detect tda8290 */
if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
- printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n",
+ printk(KERN_WARNING "%s: couldn't read register 0x%02x\n",
__func__, reg);
return -ENODEV;
}
@@ -685,13 +685,13 @@ static int tda8295_probe(struct tuner_i2c_props *i2c_props)
#define TDA8295C2_ID 0x8b
u8 reg = 0x2f, id;
struct i2c_msg msg_read[] = {
- { .addr = 0x4b, .flags = 0, .len = 1, .buf = &reg },
- { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id },
+ { .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = &reg },
+ { .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id },
};
- /* detect tda8290 */
+ /* detect tda8295 */
if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
- printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n",
+ printk(KERN_WARNING "%s: couldn't read register 0x%02x\n",
__func__, reg);
return -ENODEV;
}
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index defd839..193cdb7 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -870,6 +870,23 @@ static int dib7070p_tuner_attach(struct dvb_usb_adapter *adap)
return 0;
}
+static int stk7700p_pid_filter(struct dvb_usb_adapter *adapter, int index,
+ u16 pid, int onoff)
+{
+ struct dib0700_state *st = adapter->dev->priv;
+ if (st->is_dib7000pc)
+ return dib7000p_pid_filter(adapter->fe, index, pid, onoff);
+ return dib7000m_pid_filter(adapter->fe, index, pid, onoff);
+}
+
+static int stk7700p_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff)
+{
+ struct dib0700_state *st = adapter->dev->priv;
+ if (st->is_dib7000pc)
+ return dib7000p_pid_filter_ctrl(adapter->fe, onoff);
+ return dib7000m_pid_filter_ctrl(adapter->fe, onoff);
+}
+
static int stk70x0p_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff)
{
return dib7000p_pid_filter(adapter->fe, index, pid, onoff);
@@ -1875,8 +1892,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
{
.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
.pid_filter_count = 32,
- .pid_filter = stk70x0p_pid_filter,
- .pid_filter_ctrl = stk70x0p_pid_filter_ctrl,
+ .pid_filter = stk7700p_pid_filter,
+ .pid_filter_ctrl = stk7700p_pid_filter_ctrl,
.frontend_attach = stk7700p_frontend_attach,
.tuner_attach = stk7700p_tuner_attach,
diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c
index 9eea418..46ccd01 100644
--- a/drivers/media/dvb/dvb-usb/lmedm04.c
+++ b/drivers/media/dvb/dvb-usb/lmedm04.c
@@ -659,7 +659,7 @@ static int lme2510_download_firmware(struct usb_device *dev,
}
/* Default firmware for LME2510C */
-const char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw";
+char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw";
static void lme_coldreset(struct usb_device *dev)
{
@@ -1006,7 +1006,7 @@ static struct dvb_usb_device_properties lme2510c_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
.download_firmware = lme2510_download_firmware,
- .firmware = lme_firmware,
+ .firmware = (const char *)&lme_firmware,
.size_of_priv = sizeof(struct lme2510_state),
.num_adapters = 1,
.adapter = {
@@ -1109,5 +1109,5 @@ module_exit(lme2510_module_exit);
MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0");
-MODULE_VERSION("1.74");
+MODULE_VERSION("1.75");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/dib7000m.c b/drivers/media/dvb/frontends/dib7000m.c
index c7f5ccf..289a798 100644
--- a/drivers/media/dvb/frontends/dib7000m.c
+++ b/drivers/media/dvb/frontends/dib7000m.c
@@ -1285,6 +1285,25 @@ struct i2c_adapter * dib7000m_get_i2c_master(struct dvb_frontend *demod, enum di
}
EXPORT_SYMBOL(dib7000m_get_i2c_master);
+int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
+{
+ struct dib7000m_state *state = fe->demodulator_priv;
+ u16 val = dib7000m_read_word(state, 294 + state->reg_offs) & 0xffef;
+ val |= (onoff & 0x1) << 4;
+ dprintk("PID filter enabled %d", onoff);
+ return dib7000m_write_word(state, 294 + state->reg_offs, val);
+}
+EXPORT_SYMBOL(dib7000m_pid_filter_ctrl);
+
+int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
+{
+ struct dib7000m_state *state = fe->demodulator_priv;
+ dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff);
+ return dib7000m_write_word(state, 300 + state->reg_offs + id,
+ onoff ? (1 << 13) | pid : 0);
+}
+EXPORT_SYMBOL(dib7000m_pid_filter);
+
#if 0
/* used with some prototype boards */
int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods,
diff --git a/drivers/media/dvb/frontends/dib7000m.h b/drivers/media/dvb/frontends/dib7000m.h
index 113819c..81fcf22 100644
--- a/drivers/media/dvb/frontends/dib7000m.h
+++ b/drivers/media/dvb/frontends/dib7000m.h
@@ -46,6 +46,8 @@ extern struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap,
extern struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *,
enum dibx000_i2c_interface,
int);
+extern int dib7000m_pid_filter(struct dvb_frontend *, u8 id, u16 pid, u8 onoff);
+extern int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff);
#else
static inline
struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap,
@@ -63,6 +65,19 @@ struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *demod,
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
+static inline int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id,
+ u16 pid, u8 onoff)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
+
+static inline int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe,
+ uint8_t onoff)
+{
+ printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
+ return -ENODEV;
+}
#endif
/* TODO
diff --git a/drivers/media/dvb/mantis/mantis_pci.c b/drivers/media/dvb/mantis/mantis_pci.c
index 59feeb8..10a432a 100644
--- a/drivers/media/dvb/mantis/mantis_pci.c
+++ b/drivers/media/dvb/mantis/mantis_pci.c
@@ -22,7 +22,6 @@
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <linux/kmod.h>
#include <linux/vmalloc.h>
diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c
index 73230ff..01f258a 100644
--- a/drivers/media/rc/ir-raw.c
+++ b/drivers/media/rc/ir-raw.c
@@ -112,7 +112,7 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
{
ktime_t now;
s64 delta; /* ns */
- struct ir_raw_event ev;
+ DEFINE_IR_RAW_EVENT(ev);
int rc = 0;
if (!dev->raw)
@@ -125,7 +125,6 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
* being called for the first time, note that delta can't
* possibly be negative.
*/
- ev.duration = 0;
if (delta > IR_MAX_DURATION || !dev->raw->last_type)
type |= IR_START_EVENT;
else
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 6df0a49..e4f8eac 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -148,6 +148,7 @@ enum mceusb_model_type {
MCE_GEN2_TX_INV,
POLARIS_EVK,
CX_HYBRID_TV,
+ MULTIFUNCTION,
};
struct mceusb_model {
@@ -155,9 +156,10 @@ struct mceusb_model {
u32 mce_gen2:1;
u32 mce_gen3:1;
u32 tx_mask_normal:1;
- u32 is_polaris:1;
u32 no_tx:1;
+ int ir_intfnum;
+
const char *rc_map; /* Allow specify a per-board map */
const char *name; /* per-board name */
};
@@ -179,7 +181,6 @@ static const struct mceusb_model mceusb_model[] = {
.tx_mask_normal = 1,
},
[POLARIS_EVK] = {
- .is_polaris = 1,
/*
* In fact, the EVK is shipped without
* remotes, but we should have something handy,
@@ -189,10 +190,13 @@ static const struct mceusb_model mceusb_model[] = {
.name = "Conexant Hybrid TV (cx231xx) MCE IR",
},
[CX_HYBRID_TV] = {
- .is_polaris = 1,
.no_tx = 1, /* tx isn't wired up at all */
.name = "Conexant Hybrid TV (cx231xx) MCE IR",
},
+ [MULTIFUNCTION] = {
+ .mce_gen2 = 1,
+ .ir_intfnum = 2,
+ },
};
static struct usb_device_id mceusb_dev_table[] = {
@@ -216,8 +220,9 @@ static struct usb_device_id mceusb_dev_table[] = {
{ USB_DEVICE(VENDOR_PHILIPS, 0x206c) },
/* Philips/Spinel plus IR transceiver for ASUS */
{ USB_DEVICE(VENDOR_PHILIPS, 0x2088) },
- /* Realtek MCE IR Receiver */
- { USB_DEVICE(VENDOR_REALTEK, 0x0161) },
+ /* Realtek MCE IR Receiver and card reader */
+ { USB_DEVICE(VENDOR_REALTEK, 0x0161),
+ .driver_info = MULTIFUNCTION },
/* SMK/Toshiba G83C0004D410 */
{ USB_DEVICE(VENDOR_SMK, 0x031d),
.driver_info = MCE_GEN2_TX_INV },
@@ -1101,7 +1106,7 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
bool is_gen3;
bool is_microsoft_gen1;
bool tx_mask_normal;
- bool is_polaris;
+ int ir_intfnum;
dev_dbg(&intf->dev, "%s called\n", __func__);
@@ -1110,13 +1115,11 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
is_gen3 = mceusb_model[model].mce_gen3;
is_microsoft_gen1 = mceusb_model[model].mce_gen1;
tx_mask_normal = mceusb_model[model].tx_mask_normal;
- is_polaris = mceusb_model[model].is_polaris;
+ ir_intfnum = mceusb_model[model].ir_intfnum;
- if (is_polaris) {
- /* Interface 0 is IR */
- if (idesc->desc.bInterfaceNumber)
- return -ENODEV;
- }
+ /* There are multi-function devices with non-IR interfaces */
+ if (idesc->desc.bInterfaceNumber != ir_intfnum)
+ return -ENODEV;
/* step through the endpoints to find first bulk in and out endpoint */
for (i = 0; i < idesc->desc.bNumEndpoints; ++i) {
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 273d9d6..d4d6449 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -385,8 +385,9 @@ static void nvt_cir_regs_init(struct nvt_dev *nvt)
static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
{
- /* set number of bytes needed for wake key comparison (default 67) */
- nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_LEN, CIR_WAKE_FIFO_CMP_DEEP);
+ /* set number of bytes needed for wake from s3 (default 65) */
+ nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_CMP_BYTES,
+ CIR_WAKE_FIFO_CMP_DEEP);
/* set tolerance/variance allowed per byte during wake compare */
nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE,
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 1df8235..048135e 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -305,8 +305,11 @@ struct nvt_dev {
#define CIR_WAKE_IRFIFOSTS_RX_EMPTY 0x20
#define CIR_WAKE_IRFIFOSTS_RX_FULL 0x10
-/* CIR Wake FIFO buffer is 67 bytes long */
-#define CIR_WAKE_FIFO_LEN 67
+/*
+ * The CIR Wake FIFO buffer is 67 bytes long, but the stock remote wakes
+ * the system comparing only 65 bytes (fails with this set to 67)
+ */
+#define CIR_WAKE_FIFO_CMP_BYTES 65
/* CIR Wake byte comparison tolerance */
#define CIR_WAKE_CMP_TOLERANCE 5
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 512a2f4..5b4422e 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -850,7 +850,7 @@ static ssize_t store_protocols(struct device *device,
count++;
} else {
for (i = 0; i < ARRAY_SIZE(proto_names); i++) {
- if (!strncasecmp(tmp, proto_names[i].name, strlen(proto_names[i].name))) {
+ if (!strcasecmp(tmp, proto_names[i].name)) {
tmp += strlen(proto_names[i].name);
mask = proto_names[i].type;
break;
diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
index e41e4ad..9c475c6 100644
--- a/drivers/media/video/au0828/au0828-video.c
+++ b/drivers/media/video/au0828/au0828-video.c
@@ -1758,7 +1758,12 @@ static int vidioc_reqbufs(struct file *file, void *priv,
if (rc < 0)
return rc;
- return videobuf_reqbufs(&fh->vb_vidq, rb);
+ if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ rc = videobuf_reqbufs(&fh->vb_vidq, rb);
+ else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
+ rc = videobuf_reqbufs(&fh->vb_vbiq, rb);
+
+ return rc;
}
static int vidioc_querybuf(struct file *file, void *priv,
@@ -1772,7 +1777,12 @@ static int vidioc_querybuf(struct file *file, void *priv,
if (rc < 0)
return rc;
- return videobuf_querybuf(&fh->vb_vidq, b);
+ if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ rc = videobuf_querybuf(&fh->vb_vidq, b);
+ else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
+ rc = videobuf_querybuf(&fh->vb_vbiq, b);
+
+ return rc;
}
static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
@@ -1785,7 +1795,12 @@ static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
if (rc < 0)
return rc;
- return videobuf_qbuf(&fh->vb_vidq, b);
+ if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ rc = videobuf_qbuf(&fh->vb_vidq, b);
+ else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
+ rc = videobuf_qbuf(&fh->vb_vbiq, b);
+
+ return rc;
}
static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
@@ -1806,7 +1821,12 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
dev->greenscreen_detected = 0;
}
- return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK);
+ if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ rc = videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK);
+ else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
+ rc = videobuf_dqbuf(&fh->vb_vbiq, b, file->f_flags & O_NONBLOCK);
+
+ return rc;
}
static struct v4l2_file_operations au0828_v4l_fops = {
diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c
index 8717773..68ad196 100644
--- a/drivers/media/video/cx18/cx18-cards.c
+++ b/drivers/media/video/cx18/cx18-cards.c
@@ -95,6 +95,53 @@ static const struct cx18_card cx18_card_hvr1600_esmt = {
.i2c = &cx18_i2c_std,
};
+static const struct cx18_card cx18_card_hvr1600_s5h1411 = {
+ .type = CX18_CARD_HVR_1600_S5H1411,
+ .name = "Hauppauge HVR-1600",
+ .comment = "Simultaneous Digital and Analog TV capture supported\n",
+ .v4l2_capabilities = CX18_CAP_ENCODER,
+ .hw_audio_ctrl = CX18_HW_418_AV,
+ .hw_muxer = CX18_HW_CS5345,
+ .hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER |
+ CX18_HW_CS5345 | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL |
+ CX18_HW_Z8F0811_IR_HAUP,
+ .video_inputs = {
+ { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE7 },
+ { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO1 },
+ { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE3 },
+ { CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO2 },
+ { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE4 },
+ },
+ .audio_inputs = {
+ { CX18_CARD_INPUT_AUD_TUNER,
+ CX18_AV_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 },
+ { CX18_CARD_INPUT_LINE_IN1,
+ CX18_AV_AUDIO_SERIAL1, CS5345_IN_2 },
+ { CX18_CARD_INPUT_LINE_IN2,
+ CX18_AV_AUDIO_SERIAL1, CS5345_IN_3 },
+ },
+ .radio_input = { CX18_CARD_INPUT_AUD_TUNER,
+ CX18_AV_AUDIO_SERIAL1, CS5345_IN_4 },
+ .ddr = {
+ /* ESMT M13S128324A-5B memory */
+ .chip_config = 0x003,
+ .refresh = 0x30c,
+ .timing1 = 0x44220e82,
+ .timing2 = 0x08,
+ .tune_lane = 0,
+ .initial_emrs = 0,
+ },
+ .gpio_init.initial_value = 0x3001,
+ .gpio_init.direction = 0x3001,
+ .gpio_i2c_slave_reset = {
+ .active_lo_mask = 0x3001,
+ .msecs_asserted = 10,
+ .msecs_recovery = 40,
+ .ir_reset_mask = 0x0001,
+ },
+ .i2c = &cx18_i2c_std,
+};
+
static const struct cx18_card cx18_card_hvr1600_samsung = {
.type = CX18_CARD_HVR_1600_SAMSUNG,
.name = "Hauppauge HVR-1600 (Preproduction)",
@@ -523,7 +570,8 @@ static const struct cx18_card *cx18_card_list[] = {
&cx18_card_toshiba_qosmio_dvbt,
&cx18_card_leadtek_pvr2100,
&cx18_card_leadtek_dvr3100h,
- &cx18_card_gotview_dvd3
+ &cx18_card_gotview_dvd3,
+ &cx18_card_hvr1600_s5h1411
};
const struct cx18_card *cx18_get_card(u16 index)
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 944af8a..b1c3cbd 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -157,6 +157,7 @@ MODULE_PARM_DESC(cardtype,
"\t\t\t 7 = Leadtek WinFast PVR2100\n"
"\t\t\t 8 = Leadtek WinFast DVR3100 H\n"
"\t\t\t 9 = GoTView PCI DVD3 Hybrid\n"
+ "\t\t\t 10 = Hauppauge HVR 1600 (S5H1411)\n"
"\t\t\t 0 = Autodetect (default)\n"
"\t\t\t-1 = Ignore this card\n\t\t");
MODULE_PARM_DESC(pal, "Set PAL standard: B, G, H, D, K, I, M, N, Nc, 60");
@@ -337,6 +338,7 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
switch (cx->card->type) {
case CX18_CARD_HVR_1600_ESMT:
case CX18_CARD_HVR_1600_SAMSUNG:
+ case CX18_CARD_HVR_1600_S5H1411:
tveeprom_hauppauge_analog(&c, tv, eedata);
break;
case CX18_CARD_YUAN_MPC718:
@@ -365,7 +367,25 @@ static void cx18_process_eeprom(struct cx18 *cx)
from the model number. Use the cardtype module option if you
have one of these preproduction models. */
switch (tv.model) {
- case 74000 ... 74999:
+ case 74301: /* Retail models */
+ case 74321:
+ case 74351: /* OEM models */
+ case 74361:
+ /* Digital side is s5h1411/tda18271 */
+ cx->card = cx18_get_card(CX18_CARD_HVR_1600_S5H1411);
+ break;
+ case 74021: /* Retail models */
+ case 74031:
+ case 74041:
+ case 74141:
+ case 74541: /* OEM models */
+ case 74551:
+ case 74591:
+ case 74651:
+ case 74691:
+ case 74751:
+ case 74891:
+ /* Digital side is s5h1409/mxl5005s */
cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
break;
case 0x718:
@@ -377,7 +397,8 @@ static void cx18_process_eeprom(struct cx18 *cx)
CX18_ERR("Invalid EEPROM\n");
return;
default:
- CX18_ERR("Unknown model %d, defaulting to HVR-1600\n", tv.model);
+ CX18_ERR("Unknown model %d, defaulting to original HVR-1600 "
+ "(cardtype=1)\n", tv.model);
cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
break;
}
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index 306caac..f736679 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -85,7 +85,8 @@
#define CX18_CARD_LEADTEK_PVR2100 6 /* Leadtek WinFast PVR2100 */
#define CX18_CARD_LEADTEK_DVR3100H 7 /* Leadtek WinFast DVR3100 H */
#define CX18_CARD_GOTVIEW_PCI_DVD3 8 /* GoTView PCI DVD3 Hybrid */
-#define CX18_CARD_LAST 8
+#define CX18_CARD_HVR_1600_S5H1411 9 /* Hauppauge HVR 1600 s5h1411/tda18271*/
+#define CX18_CARD_LAST 9
#define CX18_ENC_STREAM_TYPE_MPG 0
#define CX18_ENC_STREAM_TYPE_TS 1
diff --git a/drivers/media/video/cx18/cx18-dvb.c b/drivers/media/video/cx18/cx18-dvb.c
index f0381d6..f41922b 100644
--- a/drivers/media/video/cx18/cx18-dvb.c
+++ b/drivers/media/video/cx18/cx18-dvb.c
@@ -29,6 +29,8 @@
#include "cx18-gpio.h"
#include "s5h1409.h"
#include "mxl5005s.h"
+#include "s5h1411.h"
+#include "tda18271.h"
#include "zl10353.h"
#include <linux/firmware.h>
@@ -77,6 +79,32 @@ static struct s5h1409_config hauppauge_hvr1600_config = {
};
/*
+ * CX18_CARD_HVR_1600_S5H1411
+ */
+static struct s5h1411_config hcw_s5h1411_config = {
+ .output_mode = S5H1411_SERIAL_OUTPUT,
+ .gpio = S5H1411_GPIO_OFF,
+ .vsb_if = S5H1411_IF_44000,
+ .qam_if = S5H1411_IF_4000,
+ .inversion = S5H1411_INVERSION_ON,
+ .status_mode = S5H1411_DEMODLOCKING,
+ .mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
+};
+
+static struct tda18271_std_map hauppauge_tda18271_std_map = {
+ .atsc_6 = { .if_freq = 5380, .agc_mode = 3, .std = 3,
+ .if_lvl = 6, .rfagc_top = 0x37 },
+ .qam_6 = { .if_freq = 4000, .agc_mode = 3, .std = 0,
+ .if_lvl = 6, .rfagc_top = 0x37 },
+};
+
+static struct tda18271_config hauppauge_tda18271_config = {
+ .std_map = &hauppauge_tda18271_std_map,
+ .gate = TDA18271_GATE_DIGITAL,
+ .output_opt = TDA18271_OUTPUT_LT_OFF,
+};
+
+/*
* CX18_CARD_LEADTEK_DVR3100H
*/
/* Information/confirmation of proper config values provided by Terry Wu */
@@ -244,6 +272,7 @@ static int cx18_dvb_start_feed(struct dvb_demux_feed *feed)
switch (cx->card->type) {
case CX18_CARD_HVR_1600_ESMT:
case CX18_CARD_HVR_1600_SAMSUNG:
+ case CX18_CARD_HVR_1600_S5H1411:
v = cx18_read_reg(cx, CX18_REG_DMUX_NUM_PORT_0_CONTROL);
v |= 0x00400000; /* Serial Mode */
v |= 0x00002000; /* Data Length - Byte */
@@ -455,6 +484,15 @@ static int dvb_register(struct cx18_stream *stream)
ret = 0;
}
break;
+ case CX18_CARD_HVR_1600_S5H1411:
+ dvb->fe = dvb_attach(s5h1411_attach,
+ &hcw_s5h1411_config,
+ &cx->i2c_adap[0]);
+ if (dvb->fe != NULL)
+ dvb_attach(tda18271_attach, dvb->fe,
+ 0x60, &cx->i2c_adap[0],
+ &hauppauge_tda18271_config);
+ break;
case CX18_CARD_LEADTEK_DVR3100H:
dvb->fe = dvb_attach(zl10353_attach,
&leadtek_dvr3100h_demod,
diff --git a/drivers/media/video/cx23885/cx23885-i2c.c b/drivers/media/video/cx23885/cx23885-i2c.c
index ed3d8f5..307ff54 100644
--- a/drivers/media/video/cx23885/cx23885-i2c.c
+++ b/drivers/media/video/cx23885/cx23885-i2c.c
@@ -122,10 +122,6 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
if (!i2c_wait_done(i2c_adap))
goto eio;
- if (!i2c_slave_did_ack(i2c_adap)) {
- retval = -ENXIO;
- goto err;
- }
if (i2c_debug) {
printk(" <W %02x %02x", msg->addr << 1, msg->buf[0]);
if (!(ctrl & I2C_NOSTOP))
@@ -158,7 +154,6 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
eio:
retval = -EIO;
- err:
if (i2c_debug)
printk(KERN_ERR " ERR: %d\n", retval);
return retval;
@@ -209,10 +204,6 @@ static int i2c_readbytes(struct i2c_adapter *i2c_adap,
if (!i2c_wait_done(i2c_adap))
goto eio;
- if (cnt == 0 && !i2c_slave_did_ack(i2c_adap)) {
- retval = -ENXIO;
- goto err;
- }
msg->buf[cnt] = cx_read(bus->reg_rdata) & 0xff;
if (i2c_debug) {
dprintk(1, " %02x", msg->buf[cnt]);
@@ -224,7 +215,6 @@ static int i2c_readbytes(struct i2c_adapter *i2c_adap,
eio:
retval = -EIO;
- err:
if (i2c_debug)
printk(KERN_ERR " ERR: %d\n", retval);
return retval;
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index 6fc09dd..35796e0 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -2015,7 +2015,8 @@ static int cx25840_probe(struct i2c_client *client,
kfree(state);
return err;
}
- v4l2_ctrl_cluster(2, &state->volume);
+ if (!is_cx2583x(state))
+ v4l2_ctrl_cluster(2, &state->volume);
v4l2_ctrl_handler_setup(&state->hdl);
if (client->dev.platform_data) {
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
index 9b4faf0..9c29e96 100644
--- a/drivers/media/video/ivtv/ivtv-irq.c
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -628,22 +628,66 @@ static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
static void ivtv_irq_dma_err(struct ivtv *itv)
{
u32 data[CX2341X_MBOX_MAX_DATA];
+ u32 status;
del_timer(&itv->dma_timer);
+
ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
+ status = read_reg(IVTV_REG_DMASTATUS);
IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
- read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
- write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
+ status, itv->cur_dma_stream);
+ /*
+ * We do *not* write back to the IVTV_REG_DMASTATUS register to
+ * clear the error status, if either the encoder write (0x02) or
+ * decoder read (0x01) bus master DMA operation do not indicate
+ * completed. We can race with the DMA engine, which may have
+ * transitioned to completed status *after* we read the register.
+ * Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the
+ * DMA engine has completed, will cause the DMA engine to stop working.
+ */
+ status &= 0x3;
+ if (status == 0x3)
+ write_reg(status, IVTV_REG_DMASTATUS);
+
if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
- /* retry */
- if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
+ if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
+ /* retry */
+ /*
+ * FIXME - handle cases of DMA error similar to
+ * encoder below, except conditioned on status & 0x1
+ */
ivtv_dma_dec_start(s);
- else
- ivtv_dma_enc_start(s);
- return;
+ return;
+ } else {
+ if ((status & 0x2) == 0) {
+ /*
+ * CX2341x Bus Master DMA write is ongoing.
+ * Reset the timer and let it complete.
+ */
+ itv->dma_timer.expires =
+ jiffies + msecs_to_jiffies(600);
+ add_timer(&itv->dma_timer);
+ return;
+ }
+
+ if (itv->dma_retries < 3) {
+ /*
+ * CX2341x Bus Master DMA write has ended.
+ * Retry the write, starting with the first
+ * xfer segment. Just retrying the current
+ * segment is not sufficient.
+ */
+ s->sg_processed = 0;
+ itv->dma_retries++;
+ ivtv_dma_enc_start_xfer(s);
+ return;
+ }
+ /* Too many retries, give up on this one */
+ }
+
}
if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
ivtv_udma_start(itv);
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
index c179041..e7e7178 100644
--- a/drivers/media/video/mem2mem_testdev.c
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -1011,7 +1011,6 @@ static int m2mtest_remove(struct platform_device *pdev)
v4l2_m2m_release(dev->m2m_dev);
del_timer_sync(&dev->timer);
video_unregister_device(dev->vfd);
- video_device_release(dev->vfd);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index b63f8ca..561909b 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -57,7 +57,7 @@
#include <linux/usb.h>
#define S2255_MAJOR_VERSION 1
-#define S2255_MINOR_VERSION 20
+#define S2255_MINOR_VERSION 21
#define S2255_RELEASE 0
#define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \
S2255_MINOR_VERSION, \
@@ -312,9 +312,9 @@ struct s2255_fh {
};
/* current cypress EEPROM firmware version */
-#define S2255_CUR_USB_FWVER ((3 << 8) | 6)
+#define S2255_CUR_USB_FWVER ((3 << 8) | 11)
/* current DSP FW version */
-#define S2255_CUR_DSP_FWVER 8
+#define S2255_CUR_DSP_FWVER 10102
/* Need DSP version 5+ for video status feature */
#define S2255_MIN_DSP_STATUS 5
#define S2255_MIN_DSP_COLORFILTER 8
@@ -492,9 +492,11 @@ static void planar422p_to_yuv_packed(const unsigned char *in,
static void s2255_reset_dsppower(struct s2255_dev *dev)
{
- s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b0b, NULL, 0, 1);
+ s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b01, NULL, 0, 1);
msleep(10);
s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1);
+ msleep(600);
+ s2255_vendor_req(dev, 0x10, 0x0000, 0x0000, NULL, 0, 1);
return;
}
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index e9a3eab..8c1d85e 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -621,7 +621,7 @@ static int __init memstick_init(void)
{
int rc;
- workqueue = create_freezeable_workqueue("kmemstick");
+ workqueue = create_freezable_workqueue("kmemstick");
if (!workqueue)
return -ENOMEM;
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index f71f229..1735c84 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
#endif
-#define MPT_LINUX_VERSION_COMMON "3.04.17"
-#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.17"
+#define MPT_LINUX_VERSION_COMMON "3.04.18"
+#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.18"
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
#define show_mptmod_ver(s,ver) \
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index a3856ed..e8deb8e 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -597,6 +597,13 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
}
static int
+mptctl_release(struct inode *inode, struct file *filep)
+{
+ fasync_helper(-1, filep, 0, &async_queue);
+ return 0;
+}
+
+static int
mptctl_fasync(int fd, struct file *filep, int mode)
{
MPT_ADAPTER *ioc;
@@ -2815,6 +2822,7 @@ static const struct file_operations mptctl_fops = {
.llseek = no_llseek,
.fasync = mptctl_fasync,
.unlocked_ioctl = mptctl_ioctl,
+ .release = mptctl_release,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_mpctl_ioctl,
#endif
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 59b8f53..0d9b82a 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1873,8 +1873,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
}
out:
- printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n",
- ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt);
+ printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n",
+ ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval,
+ SCpnt, SCpnt->serial_number);
return retval;
}
@@ -1911,7 +1912,7 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
vdevice = SCpnt->device->hostdata;
if (!vdevice || !vdevice->vtarget) {
- retval = SUCCESS;
+ retval = 0;
goto out;
}
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
index a0421ef..8a5b2d8 100644
--- a/drivers/message/i2o/driver.c
+++ b/drivers/message/i2o/driver.c
@@ -84,7 +84,8 @@ int i2o_driver_register(struct i2o_driver *drv)
osm_debug("Register driver %s\n", drv->name);
if (drv->event) {
- drv->event_queue = create_workqueue(drv->name);
+ drv->event_queue = alloc_workqueue(drv->name,
+ WQ_MEM_RECLAIM, 1);
if (!drv->event_queue) {
osm_err("Could not initialize event queue for driver "
"%s\n", drv->name);
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 6a1f940..c45e630 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -143,9 +143,9 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
unsigned long flags;
struct asic3 *asic;
- desc->chip->ack(irq);
+ desc->irq_data.chip->irq_ack(&desc->irq_data);
- asic = desc->handler_data;
+ asic = get_irq_data(irq);
for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) {
u32 status;
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index 33c923d..fdd8a1b 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -118,12 +118,12 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
/* Voice codec interface client */
cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL];
- cell->name = "davinci_vcif";
+ cell->name = "davinci-vcif";
cell->driver_data = davinci_vc;
/* Voice codec CQ93VC client */
cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL];
- cell->name = "cq93vc";
+ cell->name = "cq93vc-codec";
cell->driver_data = davinci_vc;
ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index 627cf57..e9018d1 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -150,12 +150,12 @@ static inline int __tps6586x_write(struct i2c_client *client,
static inline int __tps6586x_writes(struct i2c_client *client, int reg,
int len, uint8_t *val)
{
- int ret;
+ int ret, i;
- ret = i2c_smbus_write_i2c_block_data(client, reg, len, val);
- if (ret < 0) {
- dev_err(&client->dev, "failed writings to 0x%02x\n", reg);
- return ret;
+ for (i = 0; i < len; i++) {
+ ret = __tps6586x_write(client, reg + i, *(val + i));
+ if (ret < 0)
+ return ret;
}
return 0;
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 000cb41..92b85e2 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -385,12 +385,18 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev)
idev->close = ucb1x00_ts_close;
__set_bit(EV_ABS, idev->evbit);
- __set_bit(ABS_X, idev->absbit);
- __set_bit(ABS_Y, idev->absbit);
- __set_bit(ABS_PRESSURE, idev->absbit);
input_set_drvdata(idev, ts);
+ ucb1x00_adc_enable(ts->ucb);
+ ts->x_res = ucb1x00_ts_read_xres(ts);
+ ts->y_res = ucb1x00_ts_read_yres(ts);
+ ucb1x00_adc_disable(ts->ucb);
+
+ input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0);
+ input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0);
+ input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0);
+
err = input_register_device(idev);
if (err)
goto fail;
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 41233c7..f4016a0 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -246,6 +246,16 @@ static int wm8994_suspend(struct device *dev)
struct wm8994 *wm8994 = dev_get_drvdata(dev);
int ret;
+ /* Don't actually go through with the suspend if the CODEC is
+ * still active (eg, for audio passthrough from CP. */
+ ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_1);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read power status: %d\n", ret);
+ } else if (ret & WM8994_VMID_SEL_MASK) {
+ dev_dbg(dev, "CODEC still active, ignoring suspend\n");
+ return 0;
+ }
+
/* GPIO configuration state is saved here since we may be configuring
* the GPIO alternate functions even if we're not using the gpiolib
* driver for them.
@@ -261,6 +271,8 @@ static int wm8994_suspend(struct device *dev)
if (ret < 0)
dev_err(dev, "Failed to save LDO registers: %d\n", ret);
+ wm8994->suspended = true;
+
ret = regulator_bulk_disable(wm8994->num_supplies,
wm8994->supplies);
if (ret != 0) {
@@ -276,6 +288,10 @@ static int wm8994_resume(struct device *dev)
struct wm8994 *wm8994 = dev_get_drvdata(dev);
int ret;
+ /* We may have lied to the PM core about suspending */
+ if (!wm8994->suspended)
+ return 0;
+
ret = regulator_bulk_enable(wm8994->num_supplies,
wm8994->supplies);
if (ret != 0) {
@@ -298,6 +314,8 @@ static int wm8994_resume(struct device *dev)
if (ret < 0)
dev_err(dev, "Failed to restore GPIO registers: %d\n", ret);
+ wm8994->suspended = false;
+
return 0;
}
#endif
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
index 63ee4c1..b6e1c9a 100644
--- a/drivers/misc/bmp085.c
+++ b/drivers/misc/bmp085.c
@@ -449,6 +449,7 @@ static const struct i2c_device_id bmp085_id[] = {
{ "bmp085", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, bmp085_id);
static struct i2c_driver bmp085_driver = {
.driver = {
diff --git a/drivers/misc/iwmc3200top/iwmc3200top.h b/drivers/misc/iwmc3200top/iwmc3200top.h
index 740ff07..620973e 100644
--- a/drivers/misc/iwmc3200top/iwmc3200top.h
+++ b/drivers/misc/iwmc3200top/iwmc3200top.h
@@ -183,9 +183,7 @@ struct iwmct_priv {
u32 barker;
struct iwmct_dbg dbg;
- /* drivers work queue */
- struct workqueue_struct *wq;
- struct workqueue_struct *bus_rescan_wq;
+ /* drivers work items */
struct work_struct bus_rescan_worker;
struct work_struct isr_worker;
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c
index c73cef2..727af07 100644
--- a/drivers/misc/iwmc3200top/main.c
+++ b/drivers/misc/iwmc3200top/main.c
@@ -89,7 +89,7 @@ static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg)
switch (msg->hdr.opcode) {
case OP_OPR_ALIVE:
LOG_INFO(priv, FW_MSG, "Got ALIVE from device, wake rescan\n");
- queue_work(priv->bus_rescan_wq, &priv->bus_rescan_worker);
+ schedule_work(&priv->bus_rescan_worker);
break;
default:
LOG_INFO(priv, FW_MSG, "Received msg opcode 0x%X\n",
@@ -360,7 +360,7 @@ static void iwmct_irq(struct sdio_func *func)
/* clear the function's interrupt request bit (write 1 to clear) */
sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
- queue_work(priv->wq, &priv->isr_worker);
+ schedule_work(&priv->isr_worker);
LOG_TRACE(priv, IRQ, "exit iwmct_irq\n");
@@ -506,10 +506,6 @@ static int iwmct_probe(struct sdio_func *func,
priv->func = func;
sdio_set_drvdata(func, priv);
-
- /* create drivers work queue */
- priv->wq = create_workqueue(DRV_NAME "_wq");
- priv->bus_rescan_wq = create_workqueue(DRV_NAME "_rescan_wq");
INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker);
INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker);
@@ -604,9 +600,9 @@ static void iwmct_remove(struct sdio_func *func)
sdio_release_irq(func);
sdio_release_host(func);
- /* Safely destroy osc workqueue */
- destroy_workqueue(priv->bus_rescan_wq);
- destroy_workqueue(priv->wq);
+ /* Make sure works are finished */
+ flush_work_sync(&priv->bus_rescan_worker);
+ flush_work_sync(&priv->isr_worker);
sdio_claim_host(func);
sdio_disable_func(func);
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index 5f6852d..44d4475 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -329,7 +329,7 @@ static int __init tifm_init(void)
{
int rc;
- workqueue = create_freezeable_workqueue("tifm");
+ workqueue = create_freezable_workqueue("tifm");
if (!workqueue)
return -ENOMEM;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 4d2ea8e..6df5a55 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -785,7 +785,7 @@ static int __init vmballoon_init(void)
if (x86_hyper != &x86_hyper_vmware)
return -ENODEV;
- vmballoon_wq = create_freezeable_workqueue("vmmemctl");
+ vmballoon_wq = create_freezable_workqueue("vmmemctl");
if (!vmballoon_wq) {
pr_err("failed to create workqueue\n");
return -ENOMEM;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 6625c05..150b5f3 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1529,7 +1529,7 @@ void mmc_rescan(struct work_struct *work)
* still present
*/
if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
- && mmc_card_is_removable(host))
+ && !(host->caps & MMC_CAP_NONREMOVABLE))
host->bus_ops->detect(host);
/*
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 5c4a54d..ebc62ad 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -792,7 +792,6 @@ int mmc_attach_sdio(struct mmc_host *host)
*/
mmc_release_host(host);
err = mmc_add_card(host->card);
- mmc_claim_host(host);
if (err)
goto remove_added;
@@ -805,12 +804,12 @@ int mmc_attach_sdio(struct mmc_host *host)
goto remove_added;
}
+ mmc_claim_host(host);
return 0;
remove_added:
/* Remove without lock if the device has been added. */
- mmc_release_host(host);
mmc_sdio_remove(host);
mmc_claim_host(host);
remove:
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index fd877f6..2f7fc0c 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1516,21 +1516,17 @@ static int __devexit mmc_spi_remove(struct spi_device *spi)
return 0;
}
-#if defined(CONFIG_OF)
static struct of_device_id mmc_spi_of_match_table[] __devinitdata = {
{ .compatible = "mmc-spi-slot", },
{},
};
-#endif
static struct spi_driver mmc_spi_driver = {
.driver = {
.name = "mmc_spi",
.bus = &spi_bus_type,
.owner = THIS_MODULE,
-#if defined(CONFIG_OF)
.of_match_table = mmc_spi_of_match_table,
-#endif
},
.probe = mmc_spi_probe,
.remove = __devexit_p(mmc_spi_remove),
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index a8c3e1c..4aaa88f 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -1230,10 +1230,32 @@ static int inval_cache_and_wait_for_operation(
sleep_time = chip_op_time / 2;
for (;;) {
+ if (chip->state != chip_state) {
+ /* Someone's suspended the operation: sleep */
+ DECLARE_WAITQUEUE(wait, current);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ mutex_unlock(&chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ mutex_lock(&chip->mutex);
+ continue;
+ }
+
status = map_read(map, cmd_adr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
+ if (chip->erase_suspended && chip_state == FL_ERASING) {
+ /* Erase suspend occured while sleep: reset timeout */
+ timeo = reset_timeo;
+ chip->erase_suspended = 0;
+ }
+ if (chip->write_suspended && chip_state == FL_WRITING) {
+ /* Write suspend occured while sleep: reset timeout */
+ timeo = reset_timeo;
+ chip->write_suspended = 0;
+ }
if (!timeo) {
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
@@ -1257,27 +1279,6 @@ static int inval_cache_and_wait_for_operation(
timeo--;
}
mutex_lock(&chip->mutex);
-
- while (chip->state != chip_state) {
- /* Someone's suspended the operation: sleep */
- DECLARE_WAITQUEUE(wait, current);
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&chip->wq, &wait);
- mutex_unlock(&chip->mutex);
- schedule();
- remove_wait_queue(&chip->wq, &wait);
- mutex_lock(&chip->mutex);
- }
- if (chip->erase_suspended && chip_state == FL_ERASING) {
- /* Erase suspend occured while sleep: reset timeout */
- timeo = reset_timeo;
- chip->erase_suspended = 0;
- }
- if (chip->write_suspended && chip_state == FL_WRITING) {
- /* Write suspend occured while sleep: reset timeout */
- timeo = reset_timeo;
- chip->write_suspended = 0;
- }
}
/* Done and happy. */
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index d72a5fb..4e1be51 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1935,14 +1935,14 @@ static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi)
}
-static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
+static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int index)
{
int i,num_erase_regions;
uint8_t uaddr;
- if (! (jedec_table[index].devtypes & p_cfi->device_type)) {
+ if (!(jedec_table[index].devtypes & cfi->device_type)) {
DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n",
- jedec_table[index].name, 4 * (1<<p_cfi->device_type));
+ jedec_table[index].name, 4 * (1<<cfi->device_type));
return 0;
}
@@ -1950,27 +1950,28 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
num_erase_regions = jedec_table[index].nr_regions;
- p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
- if (!p_cfi->cfiq) {
+ cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
+ if (!cfi->cfiq) {
//xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
return 0;
}
- memset(p_cfi->cfiq,0,sizeof(struct cfi_ident));
+ memset(cfi->cfiq, 0, sizeof(struct cfi_ident));
- p_cfi->cfiq->P_ID = jedec_table[index].cmd_set;
- p_cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions;
- p_cfi->cfiq->DevSize = jedec_table[index].dev_size;
- p_cfi->cfi_mode = CFI_MODE_JEDEC;
+ cfi->cfiq->P_ID = jedec_table[index].cmd_set;
+ cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions;
+ cfi->cfiq->DevSize = jedec_table[index].dev_size;
+ cfi->cfi_mode = CFI_MODE_JEDEC;
+ cfi->sector_erase_cmd = CMD(0x30);
for (i=0; i<num_erase_regions; i++){
- p_cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i];
+ cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i];
}
- p_cfi->cmdset_priv = NULL;
+ cfi->cmdset_priv = NULL;
/* This may be redundant for some cases, but it doesn't hurt */
- p_cfi->mfr = jedec_table[index].mfr_id;
- p_cfi->id = jedec_table[index].dev_id;
+ cfi->mfr = jedec_table[index].mfr_id;
+ cfi->id = jedec_table[index].dev_id;
uaddr = jedec_table[index].uaddr;
@@ -1978,8 +1979,8 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
our brains explode when we see the datasheets talking about address
lines numbered from A-1 to A18. The CFI table has unlock addresses
in device-words according to the mode the device is connected in */
- p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / p_cfi->device_type;
- p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / p_cfi->device_type;
+ cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / cfi->device_type;
+ cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / cfi->device_type;
return 1; /* ok */
}
@@ -2175,7 +2176,7 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
"MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
__func__, cfi->mfr, cfi->id,
cfi->addr_unlock1, cfi->addr_unlock2 );
- if (!cfi_jedec_setup(cfi, i))
+ if (!cfi_jedec_setup(map, cfi, i))
return 0;
goto ok_out;
}
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 77d64ce..92de7e3 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -151,6 +151,7 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
printk(KERN_ERR MOD_NAME
" %s(): Unable to register resource %pR - kernel bug?\n",
__func__, &window->rsrc);
+ return -EBUSY;
}
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index cb20c67..e0a2373 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -413,7 +413,6 @@ error3:
error2:
list_del(&new->list);
error1:
- kfree(new);
return ret;
}
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 15682ec..28af71c 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -968,6 +968,6 @@ static void __exit omap_nand_exit(void)
module_init(omap_nand_init);
module_exit(omap_nand_exit);
-MODULE_ALIAS(DRIVER_NAME);
+MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index d9d7efb..6322d1f 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -930,7 +930,7 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
init_completion(&dev->dma_done);
- dev->card_workqueue = create_freezeable_workqueue(DRV_NAME);
+ dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
if (!dev->card_workqueue)
goto error9;
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index e789149..ac08750 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -131,7 +131,7 @@ static struct platform_driver generic_onenand_driver = {
.remove = __devexit_p(generic_onenand_remove),
};
-MODULE_ALIAS(DRIVER_NAME);
+MODULE_ALIAS("platform:" DRIVER_NAME);
static int __init generic_onenand_init(void)
{
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index ac31f46..c849cac 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -860,7 +860,7 @@ static void __exit omap2_onenand_exit(void)
module_init(omap2_onenand_init);
module_exit(omap2_onenand_exit);
-MODULE_ALIAS(DRIVER_NAME);
+MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 67822cf..ac0d6a8 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1258,7 +1258,7 @@ static struct mtd_blktrans_ops sm_ftl_ops = {
static __init int sm_module_init(void)
{
int error = 0;
- cache_flush_workqueue = create_freezeable_workqueue("smflush");
+ cache_flush_workqueue = create_freezable_workqueue("smflush");
if (IS_ERR(cache_flush_workqueue))
return PTR_ERR(cache_flush_workqueue);
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index 39214e5..7ca0ede 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -425,11 +425,6 @@ static irqreturn_t ariadne_interrupt(int irq, void *data)
int csr0, boguscnt;
int handled = 0;
- if (dev == NULL) {
- printk(KERN_WARNING "ariadne_interrupt(): irq for unknown device.\n");
- return IRQ_NONE;
- }
-
lance->RAP = CSR0; /* PCnet-ISA Controller Status */
if (!(lance->RDP & INTR)) /* Check if any interrupt has been */
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 653c624..8849699 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -22,7 +22,7 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.62.00-5"
+#define DRV_MODULE_VERSION "1.62.00-6"
#define DRV_MODULE_RELDATE "2011/01/30"
#define BNX2X_BC_VER 0x040200
@@ -1211,6 +1211,7 @@ struct bnx2x {
/* DCBX Negotation results */
struct dcbx_features dcbx_local_feat;
u32 dcbx_error;
+ u32 pending_max;
};
/**
@@ -1613,19 +1614,23 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_BTR 4
#define MAX_SPQ_PENDING 8
-
-/* CMNG constants
- derived from lab experiments, and not from system spec calculations !!! */
-#define DEF_MIN_RATE 100
-/* resolution of the rate shaping timer - 100 usec */
-#define RS_PERIODIC_TIMEOUT_USEC 100
-/* resolution of fairness algorithm in usecs -
- coefficient for calculating the actual t fair */
-#define T_FAIR_COEF 10000000
+/* CMNG constants, as derived from system spec calculations */
+/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
+#define DEF_MIN_RATE 100
+/* resolution of the rate shaping timer - 400 usec */
+#define RS_PERIODIC_TIMEOUT_USEC 400
/* number of bytes in single QM arbitration cycle -
- coefficient for calculating the fairness timer */
-#define QM_ARB_BYTES 40000
-#define FAIR_MEM 2
+ * coefficient for calculating the fairness timer */
+#define QM_ARB_BYTES 160000
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES 100
+/* how many bytes above threshold for the minimal credit of Min algorithm*/
+#define MIN_ABOVE_THRESH 32768
+/* Fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair */
+#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
+/* Memory of fairness algorithm . 2 cycles */
+#define FAIR_MEM 2
#define ATTN_NIG_FOR_FUNC (1L << 8)
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 710ce5d..a71b329 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -259,10 +259,44 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
#endif
}
+/* Timestamp option length allowed for TPA aggregation:
+ *
+ * nop nop kind length echo val
+ */
+#define TPA_TSTAMP_OPT_LEN 12
+/**
+ * Calculate the approximate value of the MSS for this
+ * aggregation using the first packet of it.
+ *
+ * @param bp
+ * @param parsing_flags Parsing flags from the START CQE
+ * @param len_on_bd Total length of the first packet for the
+ * aggregation.
+ */
+static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
+ u16 len_on_bd)
+{
+ /* TPA arrgregation won't have an IP options and TCP options
+ * other than timestamp.
+ */
+ u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
+
+
+ /* Check if there was a TCP timestamp, if there is it's will
+ * always be 12 bytes length: nop nop kind length echo val.
+ *
+ * Otherwise FW would close the aggregation.
+ */
+ if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
+ hdrs_len += TPA_TSTAMP_OPT_LEN;
+
+ return len_on_bd - hdrs_len;
+}
+
static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct sk_buff *skb,
struct eth_fast_path_rx_cqe *fp_cqe,
- u16 cqe_idx)
+ u16 cqe_idx, u16 parsing_flags)
{
struct sw_rx_page *rx_pg, old_rx_pg;
u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
@@ -275,8 +309,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* This is needed in order to enable forwarding support */
if (frag_size)
- skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
- max(frag_size, (u32)len_on_bd));
+ skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
+ len_on_bd);
#ifdef BNX2X_STOP_ON_ERROR
if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
@@ -344,6 +378,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (likely(new_skb)) {
/* fix ip xsum and give it to the stack */
/* (no need to map the new skb) */
+ u16 parsing_flags =
+ le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
prefetch(skb);
prefetch(((char *)(skb)) + L1_CACHE_BYTES);
@@ -373,9 +409,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
if (!bnx2x_fill_frag_skb(bp, fp, skb,
- &cqe->fast_path_cqe, cqe_idx)) {
- if ((le16_to_cpu(cqe->fast_path_cqe.
- pars_flags.flags) & PARSING_FLAGS_VLAN))
+ &cqe->fast_path_cqe, cqe_idx,
+ parsing_flags)) {
+ if (parsing_flags & PARSING_FLAGS_VLAN)
__vlan_hwaccel_put_tag(skb,
le16_to_cpu(cqe->fast_path_cqe.
vlan_tag));
@@ -703,19 +739,20 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
{
u16 line_speed = bp->link_vars.line_speed;
if (IS_MF(bp)) {
- u16 maxCfg = (bp->mf_config[BP_VN(bp)] &
- FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT;
- /* Calculate the current MAX line speed limit for the DCC
- * capable devices
+ u16 maxCfg = bnx2x_extract_max_cfg(bp,
+ bp->mf_config[BP_VN(bp)]);
+
+ /* Calculate the current MAX line speed limit for the MF
+ * devices
*/
- if (IS_MF_SD(bp)) {
+ if (IS_MF_SI(bp))
+ line_speed = (line_speed * maxCfg) / 100;
+ else { /* SD mode */
u16 vn_max_rate = maxCfg * 100;
if (vn_max_rate < line_speed)
line_speed = vn_max_rate;
- } else /* IS_MF_SI(bp)) */
- line_speed = (line_speed * maxCfg) / 100;
+ }
}
return line_speed;
@@ -959,6 +996,23 @@ void bnx2x_free_skbs(struct bnx2x *bp)
bnx2x_free_rx_skbs(bp);
}
+void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
+{
+ /* load old values */
+ u32 mf_cfg = bp->mf_config[BP_VN(bp)];
+
+ if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
+ /* leave all but MAX value */
+ mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
+
+ /* set new MAX value */
+ mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
+ & FUNC_MF_CFG_MAX_BW_MASK;
+
+ bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
+ }
+}
+
static void bnx2x_free_msix_irqs(struct bnx2x *bp)
{
int i, offset = 1;
@@ -1427,6 +1481,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bnx2x_set_eth_mac(bp, 1);
+ if (bp->pending_max) {
+ bnx2x_update_max_mf_config(bp, bp->pending_max);
+ bp->pending_max = 0;
+ }
+
if (bp->port.pmf)
bnx2x_initial_phy_init(bp, load_mode);
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 03eb4d6..85ea7f2 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -341,6 +341,15 @@ void bnx2x_dcbx_init(struct bnx2x *bp);
*/
int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
+/**
+ * Updates MAX part of MF configuration in HW
+ * (if required)
+ *
+ * @param bp
+ * @param value
+ */
+void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
+
/* dev_close main block */
int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
@@ -1044,4 +1053,24 @@ static inline void storm_memset_cmng(struct bnx2x *bp,
void bnx2x_acquire_phy_lock(struct bnx2x *bp);
void bnx2x_release_phy_lock(struct bnx2x *bp);
+/**
+ * Extracts MAX BW part from MF configuration.
+ *
+ * @param bp
+ * @param mf_cfg
+ *
+ * @return u16
+ */
+static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
+{
+ u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT;
+ if (!max_cfg) {
+ BNX2X_ERR("Illegal configuration detected for Max BW - "
+ "using 100 instead\n");
+ max_cfg = 100;
+ }
+ return max_cfg;
+}
+
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 5b44a8b..7e92f9d 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -238,7 +238,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
speed |= (cmd->speed_hi << 16);
if (IS_MF_SI(bp)) {
- u32 param = 0;
+ u32 part;
u32 line_speed = bp->link_vars.line_speed;
/* use 10G if no link detected */
@@ -251,23 +251,22 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
REQ_BC_VER_4_SET_MF_BW);
return -EINVAL;
}
- if (line_speed < speed) {
- BNX2X_DEV_INFO("New speed should be less or equal "
- "to actual line speed\n");
+
+ part = (speed * 100) / line_speed;
+
+ if (line_speed < speed || !part) {
+ BNX2X_DEV_INFO("Speed setting should be in a range "
+ "from 1%% to 100%% "
+ "of actual line speed\n");
return -EINVAL;
}
- /* load old values */
- param = bp->mf_config[BP_VN(bp)];
-
- /* leave only MIN value */
- param &= FUNC_MF_CFG_MIN_BW_MASK;
- /* set new MAX value */
- param |= (((speed * 100) / line_speed)
- << FUNC_MF_CFG_MAX_BW_SHIFT)
- & FUNC_MF_CFG_MAX_BW_MASK;
+ if (bp->state != BNX2X_STATE_OPEN)
+ /* store value for following "load" */
+ bp->pending_max = part;
+ else
+ bnx2x_update_max_mf_config(bp, part);
- bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param);
return 0;
}
@@ -1781,9 +1780,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
{ 0x100, 0x350 }, /* manuf_info */
{ 0x450, 0xf0 }, /* feature_info */
{ 0x640, 0x64 }, /* upgrade_key_info */
- { 0x6a4, 0x64 },
{ 0x708, 0x70 }, /* manuf_key_info */
- { 0x778, 0x70 },
{ 0, 0 }
};
__be32 buf[0x350 / 4];
@@ -1933,11 +1930,11 @@ static void bnx2x_self_test(struct net_device *dev,
buf[4] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
- if (bp->port.pmf)
- if (bnx2x_link_test(bp, is_serdes) != 0) {
- buf[5] = 1;
- etest->flags |= ETH_TEST_FL_FAILED;
- }
+
+ if (bnx2x_link_test(bp, is_serdes) != 0) {
+ buf[5] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
#ifdef BNX2X_EXTRA_DEBUG
bnx2x_panic_dump(bp);
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index 5a268e9..fa6dbe3 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -241,7 +241,7 @@ static const struct {
/* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
* want to handle "system kill" flow at the moment.
*/
- BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff),
+ BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff),
BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff),
BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index d584d32..aa03233 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -1974,13 +1974,22 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
vn_max_rate = 0;
} else {
+ u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
+
vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
- /* If min rate is zero - set it to 1 */
+ /* If fairness is enabled (not all min rates are zeroes) and
+ if current min rate is zero - set it to 1.
+ This is a requirement of the algorithm. */
if (bp->vn_weight_sum && (vn_min_rate == 0))
vn_min_rate = DEF_MIN_RATE;
- vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
+
+ if (IS_MF_SI(bp))
+ /* maxCfg in percents of linkspeed */
+ vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
+ else
+ /* maxCfg is absolute in 100Mb units */
+ vn_max_rate = maxCfg * 100;
}
DP(NETIF_MSG_IFUP,
@@ -2006,7 +2015,8 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
m_fair_vn.vn_credit_delta =
max_t(u32, (vn_min_rate * (T_FAIR_COEF /
(8 * bp->vn_weight_sum))),
- (bp->cmng.fair_vars.fair_threshold * 2));
+ (bp->cmng.fair_vars.fair_threshold +
+ MIN_ABOVE_THRESH));
DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
m_fair_vn.vn_credit_delta);
}
@@ -2082,8 +2092,9 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
bnx2x_calc_vn_weight_sum(bp);
/* calculate and set min-max rate for each vn */
- for (vn = VN_0; vn < E1HVN_MAX; vn++)
- bnx2x_init_vn_minmax(bp, vn);
+ if (bp->port.pmf)
+ for (vn = VN_0; vn < E1HVN_MAX; vn++)
+ bnx2x_init_vn_minmax(bp, vn);
/* always enable rate shaping and fairness */
bp->cmng.flags.cmng_enables |=
@@ -2152,13 +2163,6 @@ static void bnx2x_link_attn(struct bnx2x *bp)
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
}
- /* indicate link status only if link status actually changed */
- if (prev_link_status != bp->link_vars.link_status)
- bnx2x_link_report(bp);
-
- if (IS_MF(bp))
- bnx2x_link_sync_notify(bp);
-
if (bp->link_vars.link_up && bp->link_vars.line_speed) {
int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
@@ -2170,6 +2174,13 @@ static void bnx2x_link_attn(struct bnx2x *bp)
DP(NETIF_MSG_IFUP,
"single function mode without fairness\n");
}
+
+ if (IS_MF(bp))
+ bnx2x_link_sync_notify(bp);
+
+ /* indicate link status only if link status actually changed */
+ if (prev_link_status != bp->link_vars.link_status)
+ bnx2x_link_report(bp);
}
void bnx2x__link_status_update(struct bnx2x *bp)
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index bda60d5..3445ded 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -1239,14 +1239,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
if (unlikely(bp->panic))
return;
+ bnx2x_stats_stm[bp->stats_state][event].action(bp);
+
/* Protect a state change flow */
spin_lock_bh(&bp->stats_lock);
state = bp->stats_state;
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
spin_unlock_bh(&bp->stats_lock);
- bnx2x_stats_stm[state][event].action(bp);
-
if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
state, event, bp->stats_state);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 1024ae1..a5d5d0b 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -281,23 +281,23 @@ static inline int __check_agg_selection_timer(struct port *port)
}
/**
- * __get_rx_machine_lock - lock the port's RX machine
+ * __get_state_machine_lock - lock the port's state machines
* @port: the port we're looking at
*
*/
-static inline void __get_rx_machine_lock(struct port *port)
+static inline void __get_state_machine_lock(struct port *port)
{
- spin_lock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock));
+ spin_lock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
}
/**
- * __release_rx_machine_lock - unlock the port's RX machine
+ * __release_state_machine_lock - unlock the port's state machines
* @port: the port we're looking at
*
*/
-static inline void __release_rx_machine_lock(struct port *port)
+static inline void __release_state_machine_lock(struct port *port)
{
- spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock));
+ spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
}
/**
@@ -388,14 +388,14 @@ static u8 __get_duplex(struct port *port)
}
/**
- * __initialize_port_locks - initialize a port's RX machine spinlock
+ * __initialize_port_locks - initialize a port's STATE machine spinlock
* @port: the port we're looking at
*
*/
static inline void __initialize_port_locks(struct port *port)
{
// make sure it isn't called twice
- spin_lock_init(&(SLAVE_AD_INFO(port->slave).rx_machine_lock));
+ spin_lock_init(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
}
//conversions
@@ -1025,9 +1025,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
{
rx_states_t last_state;
- // Lock to prevent 2 instances of this function to run simultaneously(rx interrupt and periodic machine callback)
- __get_rx_machine_lock(port);
-
// keep current State Machine state to compare later if it was changed
last_state = port->sm_rx_state;
@@ -1133,7 +1130,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
pr_err("%s: An illegal loopback occurred on adapter (%s).\n"
"Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
port->slave->dev->master->name, port->slave->dev->name);
- __release_rx_machine_lock(port);
return;
}
__update_selected(lacpdu, port);
@@ -1153,7 +1149,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
break;
}
}
- __release_rx_machine_lock(port);
}
/**
@@ -2155,6 +2150,12 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
goto re_arm;
}
+ /* Lock around state machines to protect data accessed
+ * by all (e.g., port->sm_vars). ad_rx_machine may run
+ * concurrently due to incoming LACPDU.
+ */
+ __get_state_machine_lock(port);
+
ad_rx_machine(NULL, port);
ad_periodic_machine(port);
ad_port_selection_logic(port);
@@ -2164,6 +2165,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
// turn off the BEGIN bit, since we already handled it
if (port->sm_vars & AD_PORT_BEGIN)
port->sm_vars &= ~AD_PORT_BEGIN;
+
+ __release_state_machine_lock(port);
}
re_arm:
@@ -2200,7 +2203,10 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
case AD_TYPE_LACPDU:
pr_debug("Received LACPDU on port %d\n",
port->actor_port_number);
+ /* Protect against concurrent state machines */
+ __get_state_machine_lock(port);
ad_rx_machine(lacpdu, port);
+ __release_state_machine_lock(port);
break;
case AD_TYPE_MARKER:
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 2c46a154..b28baff 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -264,7 +264,8 @@ struct ad_bond_info {
struct ad_slave_info {
struct aggregator aggregator; // 802.3ad aggregator structure
struct port port; // 802.3ad port structure
- spinlock_t rx_machine_lock; // To avoid race condition between callback and receive interrupt
+ spinlock_t state_machine_lock; /* mutex state machines vs.
+ incoming LACPDU */
u16 id;
};
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 7ab534a..7513c45 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -940,7 +940,7 @@ static int mcp251x_open(struct net_device *net)
goto open_unlock;
}
- priv->wq = create_freezeable_workqueue("mcp251x_wq");
+ priv->wq = create_freezable_workqueue("mcp251x_wq");
INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig
index 8ba81b3..5de46a9 100644
--- a/drivers/net/can/softing/Kconfig
+++ b/drivers/net/can/softing/Kconfig
@@ -18,7 +18,7 @@ config CAN_SOFTING
config CAN_SOFTING_CS
tristate "Softing Gmbh CAN pcmcia cards"
depends on PCMCIA
- select CAN_SOFTING
+ depends on CAN_SOFTING
---help---
Support for PCMCIA cards from Softing Gmbh & some cards
from Vector Gmbh.
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 5157e15..aeea9f9 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -633,6 +633,7 @@ static const struct net_device_ops softing_netdev_ops = {
};
static const struct can_bittiming_const softing_btr_const = {
+ .name = "softing",
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 7ff170c..302be4a 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -2760,6 +2760,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
int kcqe_cnt;
+ /* status block index must be read before reading other fields */
+ rmb();
cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
@@ -2770,6 +2772,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
barrier();
if (status_idx != *cp->kcq1.status_idx_ptr) {
status_idx = (u16) *cp->kcq1.status_idx_ptr;
+ /* status block index must be read first */
+ rmb();
cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
} else
break;
@@ -2888,6 +2892,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
u32 last_status = *info->status_idx_ptr;
int kcqe_cnt;
+ /* status block index must be read before reading the KCQ */
+ rmb();
while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
service_kcqes(dev, kcqe_cnt);
@@ -2898,6 +2904,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
break;
last_status = *info->status_idx_ptr;
+ /* status block index must be read before reading the KCQ */
+ rmb();
}
return last_status;
}
@@ -2906,26 +2914,35 @@ static void cnic_service_bnx2x_bh(unsigned long data)
{
struct cnic_dev *dev = (struct cnic_dev *) data;
struct cnic_local *cp = dev->cnic_priv;
- u32 status_idx;
+ u32 status_idx, new_status_idx;
if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
return;
- status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
+ while (1) {
+ status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
- CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
+ CNIC_WR16(dev, cp->kcq1.io_addr,
+ cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
- if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
- status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
+ if (!BNX2X_CHIP_IS_E2(cp->chip_id)) {
+ cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
+ status_idx, IGU_INT_ENABLE, 1);
+ break;
+ }
+
+ new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
+
+ if (new_status_idx != status_idx)
+ continue;
CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
MAX_KCQ_IDX);
cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
status_idx, IGU_INT_ENABLE, 1);
- } else {
- cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
- status_idx, IGU_INT_ENABLE, 1);
+
+ break;
}
}
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h
index a550d0c..eb71b82 100644
--- a/drivers/net/cxgb4/t4_msg.h
+++ b/drivers/net/cxgb4/t4_msg.h
@@ -123,6 +123,7 @@ enum {
ULP_MODE_NONE = 0,
ULP_MODE_ISCSI = 2,
ULP_MODE_RDMA = 4,
+ ULP_MODE_TCPDDP = 5,
ULP_MODE_FCOE = 6,
};
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 56166ae..6aad64d 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -2040,7 +2040,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
{
int i;
- BUG_ON(adapter->debugfs_root == NULL);
+ BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
/*
* Debugfs support is best effort.
@@ -2061,7 +2061,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
*/
static void cleanup_debugfs(struct adapter *adapter)
{
- BUG_ON(adapter->debugfs_root == NULL);
+ BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
/*
* Unlike our sister routine cleanup_proc(), we don't need to remove
@@ -2489,17 +2489,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
struct net_device *netdev;
/*
- * Vet our module parameters.
- */
- if (msi != MSI_MSIX && msi != MSI_MSI) {
- dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d"
- " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX,
- MSI_MSI);
- err = -EINVAL;
- goto err_out;
- }
-
- /*
* Print our driver banner the first time we're called to initialize a
* device.
*/
@@ -2711,11 +2700,11 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
/*
* Set up our debugfs entries.
*/
- if (cxgb4vf_debugfs_root) {
+ if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
adapter->debugfs_root =
debugfs_create_dir(pci_name(pdev),
cxgb4vf_debugfs_root);
- if (adapter->debugfs_root == NULL)
+ if (IS_ERR_OR_NULL(adapter->debugfs_root))
dev_warn(&pdev->dev, "could not create debugfs"
" directory");
else
@@ -2770,7 +2759,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
*/
err_free_debugfs:
- if (adapter->debugfs_root) {
+ if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
cleanup_debugfs(adapter);
debugfs_remove_recursive(adapter->debugfs_root);
}
@@ -2802,7 +2791,6 @@ err_release_regions:
err_disable_device:
pci_disable_device(pdev);
-err_out:
return err;
}
@@ -2840,7 +2828,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
/*
* Tear down our debugfs entries.
*/
- if (adapter->debugfs_root) {
+ if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
cleanup_debugfs(adapter);
debugfs_remove_recursive(adapter->debugfs_root);
}
@@ -2874,6 +2862,46 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
}
/*
+ * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
+ * delivery.
+ */
+static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev)
+{
+ struct adapter *adapter;
+ int pidx;
+
+ adapter = pci_get_drvdata(pdev);
+ if (!adapter)
+ return;
+
+ /*
+ * Disable all Virtual Interfaces. This will shut down the
+ * delivery of all ingress packets into the chip for these
+ * Virtual Interfaces.
+ */
+ for_each_port(adapter, pidx) {
+ struct net_device *netdev;
+ struct port_info *pi;
+
+ if (!test_bit(pidx, &adapter->registered_device_map))
+ continue;
+
+ netdev = adapter->port[pidx];
+ if (!netdev)
+ continue;
+
+ pi = netdev_priv(netdev);
+ t4vf_enable_vi(adapter, pi->viid, false, false);
+ }
+
+ /*
+ * Free up all Queues which will prevent further DMA and
+ * Interrupts allowing various internal pathways to drain.
+ */
+ t4vf_free_sge_resources(adapter);
+}
+
+/*
* PCI Device registration data structures.
*/
#define CH_DEVICE(devid, idx) \
@@ -2906,6 +2934,7 @@ static struct pci_driver cxgb4vf_driver = {
.id_table = cxgb4vf_pci_tbl,
.probe = cxgb4vf_pci_probe,
.remove = __devexit_p(cxgb4vf_pci_remove),
+ .shutdown = __devexit_p(cxgb4vf_pci_shutdown),
};
/*
@@ -2915,14 +2944,25 @@ static int __init cxgb4vf_module_init(void)
{
int ret;
+ /*
+ * Vet our module parameters.
+ */
+ if (msi != MSI_MSIX && msi != MSI_MSI) {
+ printk(KERN_WARNING KBUILD_MODNAME
+ ": bad module parameter msi=%d; must be %d"
+ " (MSI-X or MSI) or %d (MSI)\n",
+ msi, MSI_MSIX, MSI_MSI);
+ return -EINVAL;
+ }
+
/* Debugfs support is optional, just warn if this fails */
cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (!cxgb4vf_debugfs_root)
+ if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
printk(KERN_WARNING KBUILD_MODNAME ": could not create"
" debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4vf_driver);
- if (ret < 0)
+ if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
debugfs_remove(cxgb4vf_debugfs_root);
return ret;
}
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index 0f51c80..192db22 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -171,7 +171,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
delay_idx = 0;
ms = delay[0];
- for (i = 0; i < 500; i += ms) {
+ for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
if (sleep_ok) {
ms = delay[delay_idx];
if (delay_idx < ARRAY_SIZE(delay) - 1)
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 2a628d1..7018bfe 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1008,7 +1008,7 @@ static void emac_rx_handler(void *token, int len, int status)
int ret;
/* free and bail if we are shutting down */
- if (unlikely(!netif_running(ndev))) {
+ if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) {
dev_kfree_skb_any(skb);
return;
}
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 2d4c4fc..461dd6f 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -802,10 +802,7 @@ dm9000_init_dm9000(struct net_device *dev)
/* Checksum mode */
dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
- /* GPIO0 on pre-activate PHY */
- iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
- iow(db, DM9000_GPR, 0); /* Enable PHY */
ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
@@ -852,8 +849,8 @@ static void dm9000_timeout(struct net_device *dev)
unsigned long flags;
/* Save previous register address */
- reg_save = readb(db->io_addr);
spin_lock_irqsave(&db->lock, flags);
+ reg_save = readb(db->io_addr);
netif_stop_queue(dev);
dm9000_reset(db);
@@ -1194,6 +1191,10 @@ dm9000_open(struct net_device *dev)
if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
return -EAGAIN;
+ /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
+ iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
+ mdelay(1); /* delay needs by DM9000B */
+
/* Initialize DM9000 board */
dm9000_reset(db);
dm9000_init_dm9000(dev);
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
index 9d8a20b..8318ea0 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/dnet.c
@@ -337,8 +337,6 @@ static int dnet_mii_init(struct dnet *bp)
for (i = 0; i < PHY_MAX_ADDR; i++)
bp->mii_bus->irq[i] = PHY_POLL;
- platform_set_drvdata(bp->dev, bp->mii_bus);
-
if (mdiobus_register(bp->mii_bus)) {
err = -ENXIO;
goto err_out_free_mdio_irq;
@@ -863,6 +861,7 @@ static int __devinit dnet_probe(struct platform_device *pdev)
bp = netdev_priv(dev);
bp->dev = dev;
+ platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
spin_lock_init(&bp->lock);
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index 55c1711..33e7c45a 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -42,7 +42,8 @@
#define GBE_CONFIG_RAM_BASE \
((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
-#define GBE_CONFIG_BASE_VIRT phys_to_virt(GBE_CONFIG_RAM_BASE)
+#define GBE_CONFIG_BASE_VIRT \
+ ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE))
#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
(iowrite16_rep(base + offset, data, count))
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 3065870..6d513a3 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -937,6 +937,9 @@ static void e1000_print_hw_hang(struct work_struct *work)
u16 phy_status, phy_1000t_status, phy_ext_status;
u16 pci_status;
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
e1e_rphy(hw, PHY_STATUS, &phy_status);
e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1506,6 +1509,9 @@ static void e1000e_downshift_workaround(struct work_struct *work)
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter, downshift_task);
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
}
@@ -3338,6 +3344,21 @@ int e1000e_up(struct e1000_adapter *adapter)
return 0;
}
+static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (!(adapter->flags2 & FLAG2_DMA_BURST))
+ return;
+
+ /* flush pending descriptor writebacks to memory */
+ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+ ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
+
+ /* execute the writes immediately */
+ e1e_flush();
+}
+
void e1000e_down(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -3377,6 +3398,9 @@ void e1000e_down(struct e1000_adapter *adapter)
if (!pci_channel_offline(adapter->pdev))
e1000e_reset(adapter);
+
+ e1000e_flush_descriptors(adapter);
+
e1000_clean_tx_ring(adapter);
e1000_clean_rx_ring(adapter);
@@ -3765,6 +3789,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter, update_phy_task);
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
e1000_get_phy_info(&adapter->hw);
}
@@ -3775,6 +3803,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
static void e1000_update_phy_info(unsigned long data)
{
struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
schedule_work(&adapter->update_phy_task);
}
@@ -4149,6 +4181,9 @@ static void e1000_watchdog_task(struct work_struct *work)
u32 link, tctl;
int tx_pending = 0;
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
link = e1000e_has_link(adapter);
if ((netif_carrier_ok(netdev)) && link) {
/* Cancel scheduled suspend requests. */
@@ -4337,19 +4372,12 @@ link_up:
else
ew32(ICS, E1000_ICS_RXDMT0);
+ /* flush pending descriptors to memory before detecting Tx hang */
+ e1000e_flush_descriptors(adapter);
+
/* Force detection of hung controller every watchdog period */
adapter->detect_tx_hung = 1;
- /* flush partial descriptors to memory before detecting Tx hang */
- if (adapter->flags2 & FLAG2_DMA_BURST) {
- ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
- ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
- /*
- * no need to flush the writes because the timeout code does
- * an er32 first thing
- */
- }
-
/*
* With 82571 controllers, LAA may be overwritten due to controller
* reset from the other port. Set the appropriate LAA in RAR[0]
@@ -4887,6 +4915,10 @@ static void e1000_reset_task(struct work_struct *work)
struct e1000_adapter *adapter;
adapter = container_of(work, struct e1000_adapter, reset_task);
+ /* don't run the task if already down */
+ if (test_bit(__E1000_DOWN, &adapter->state))
+ return;
+
if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
(adapter->flags & FLAG_RX_RESTART_NOW))) {
e1000e_dump(adapter);
@@ -5306,7 +5338,7 @@ void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
__e1000e_disable_aspm(pdev, state);
}
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
static bool e1000e_pm_ready(struct e1000_adapter *adapter)
{
return !!adapter->tx_ring->buffer_info;
@@ -5457,7 +5489,7 @@ static int e1000_runtime_resume(struct device *dev)
return __e1000_resume(pdev);
}
#endif /* CONFIG_PM_RUNTIME */
-#endif /* CONFIG_PM_OPS */
+#endif /* CONFIG_PM */
static void e1000_shutdown(struct pci_dev *pdev)
{
@@ -5935,7 +5967,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* APME bit in EEPROM is mapped to WUC.APME */
eeprom_data = er32(WUC);
eeprom_apme_mask = E1000_WUC_APME;
- if (eeprom_data & E1000_WUC_PHY_WAKE)
+ if ((hw->mac.type > e1000_ich10lan) &&
+ (eeprom_data & E1000_WUC_PHY_WAKE))
adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
@@ -6163,7 +6196,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
};
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
static const struct dev_pm_ops e1000_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
@@ -6177,7 +6210,7 @@ static struct pci_driver e1000_driver = {
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
.remove = __devexit_p(e1000_remove),
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
.driver.pm = &e1000_pm_ops,
#endif
.shutdown = e1000_shutdown,
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index b79d7e1..db0290f 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -1163,15 +1163,11 @@ static int ethoc_resume(struct platform_device *pdev)
# define ethoc_resume NULL
#endif
-#ifdef CONFIG_OF
static struct of_device_id ethoc_match[] = {
- {
- .compatible = "opencores,ethoc",
- },
+ { .compatible = "opencores,ethoc", },
{},
};
MODULE_DEVICE_TABLE(of, ethoc_match);
-#endif
static struct platform_driver ethoc_driver = {
.probe = ethoc_probe,
@@ -1181,9 +1177,7 @@ static struct platform_driver ethoc_driver = {
.driver = {
.name = "ethoc",
.owner = THIS_MODULE,
-#ifdef CONFIG_OF
.of_match_table = ethoc_match,
-#endif
},
};
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 2a71373..cd0282d 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -74,7 +74,8 @@ static struct platform_device_id fec_devtype[] = {
}, {
.name = "imx28-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
- }
+ },
+ { }
};
static unsigned char macaddr[ETH_ALEN];
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index af09296..9c0b1ba 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5645,6 +5645,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
goto out_error;
}
+ netif_carrier_off(dev);
+
dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c
index 74486a8..af3822f 100644
--- a/drivers/net/igbvf/vf.c
+++ b/drivers/net/igbvf/vf.c
@@ -220,7 +220,7 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
* The parameter rar_count will usually be hw->mac.rar_entry_count
* unless there are workarounds that change this.
**/
-void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
+static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count,
u32 rar_used_count, u32 rar_count)
{
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 8753980..c54a882 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -159,7 +159,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
struct scatterlist *sg;
unsigned int i, j, dmacount;
unsigned int len;
- static const unsigned int bufflen = 4096;
+ static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
unsigned int firstoff = 0;
unsigned int lastsize;
unsigned int thisoff = 0;
@@ -254,6 +254,24 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
/* only the last buffer may have non-full bufflen */
lastsize = thisoff + thislen;
+ /*
+ * lastsize can not be buffer len.
+ * If it is then adding another buffer with lastsize = 1.
+ */
+ if (lastsize == bufflen) {
+ if (j >= IXGBE_BUFFCNT_MAX) {
+ e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
+ "not enough user buffers. We need an extra "
+ "buffer because lastsize is bufflen.\n",
+ xid, i, j, dmacount, (u64)addr);
+ goto out_noddp_free;
+ }
+
+ ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
+ j++;
+ lastsize = 1;
+ }
+
fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
@@ -532,6 +550,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
e_err(drv, "failed to allocated FCoE DDP pool\n");
spin_lock_init(&fcoe->lock);
+
+ /* Extra buffer to be shared by all DDPs for HW work around */
+ fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
+ if (fcoe->extra_ddp_buffer == NULL) {
+ e_err(drv, "failed to allocated extra DDP buffer\n");
+ goto out_extra_ddp_buffer_alloc;
+ }
+
+ fcoe->extra_ddp_buffer_dma =
+ dma_map_single(&adapter->pdev->dev,
+ fcoe->extra_ddp_buffer,
+ IXGBE_FCBUFF_MIN,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev,
+ fcoe->extra_ddp_buffer_dma)) {
+ e_err(drv, "failed to map extra DDP buffer\n");
+ goto out_extra_ddp_buffer_dma;
+ }
}
/* Enable L2 eth type filter for FCoE */
@@ -581,6 +617,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
}
}
#endif
+
+ return;
+
+out_extra_ddp_buffer_dma:
+ kfree(fcoe->extra_ddp_buffer);
+out_extra_ddp_buffer_alloc:
+ pci_pool_destroy(fcoe->pool);
+ fcoe->pool = NULL;
}
/**
@@ -600,6 +644,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
if (fcoe->pool) {
for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
ixgbe_fcoe_ddp_put(adapter->netdev, i);
+ dma_unmap_single(&adapter->pdev->dev,
+ fcoe->extra_ddp_buffer_dma,
+ IXGBE_FCBUFF_MIN,
+ DMA_FROM_DEVICE);
+ kfree(fcoe->extra_ddp_buffer);
pci_pool_destroy(fcoe->pool);
fcoe->pool = NULL;
}
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index 4bc2c55..65cc8fb 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -70,6 +70,8 @@ struct ixgbe_fcoe {
spinlock_t lock;
struct pci_pool *pool;
struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
+ unsigned char *extra_ddp_buffer;
+ dma_addr_t extra_ddp_buffer_dma;
};
#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index fbae703..30f9ccf 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3728,7 +3728,8 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
* We need to try and force an autonegotiation
* session, then bring up link.
*/
- hw->mac.ops.setup_sfp(hw);
+ if (hw->mac.ops.setup_sfp)
+ hw->mac.ops.setup_sfp(hw);
if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
schedule_work(&adapter->multispeed_fiber_task);
} else {
@@ -5968,7 +5969,8 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
unregister_netdev(adapter->netdev);
return;
}
- hw->mac.ops.setup_sfp(hw);
+ if (hw->mac.ops.setup_sfp)
+ hw->mac.ops.setup_sfp(hw);
if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
/* This will also work for DA Twinax connections */
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index f69e73e..79ccb54 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -260,7 +260,7 @@ static int macb_mii_init(struct macb *bp)
for (i = 0; i < PHY_MAX_ADDR; i++)
bp->mii_bus->irq[i] = PHY_POLL;
- platform_set_drvdata(bp->dev, bp->mii_bus);
+ dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
if (mdiobus_register(bp->mii_bus))
goto err_out_free_mdio_irq;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 5933621..fc27a99 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -528,8 +528,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
vnet_hdr_len = q->vnet_hdr_sz;
err = -EINVAL;
- if ((len -= vnet_hdr_len) < 0)
+ if (len < vnet_hdr_len)
goto err;
+ len -= vnet_hdr_len;
err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
sizeof(vnet_hdr));
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
index a0c26a9..e1e33c8 100644
--- a/drivers/net/pch_gbe/pch_gbe.h
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -73,7 +73,7 @@ struct pch_gbe_regs {
struct pch_gbe_regs_mac_adr mac_adr[16];
u32 ADDR_MASK;
u32 MIIM;
- u32 reserve2;
+ u32 MAC_ADDR_LOAD;
u32 RGMII_ST;
u32 RGMII_CTRL;
u32 reserve3[3];
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 4c9a7d4..8c66e22 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -29,6 +29,7 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_SHORT_PKT 64
#define DSC_INIT16 0xC000
#define PCH_GBE_DMA_ALIGN 0
+#define PCH_GBE_DMA_PADDING 2
#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
#define PCH_GBE_COPYBREAK_DEFAULT 256
#define PCH_GBE_PCI_BAR 1
@@ -88,6 +89,12 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
int data);
+
+inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
+{
+ iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
+}
+
/**
* pch_gbe_mac_read_mac_addr - Read MAC address
* @hw: Pointer to the HW structure
@@ -1365,16 +1372,13 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
struct pch_gbe_buffer *buffer_info;
struct pch_gbe_rx_desc *rx_desc;
u32 length;
- unsigned char tmp_packet[ETH_HLEN];
unsigned int i;
unsigned int cleaned_count = 0;
bool cleaned = false;
- struct sk_buff *skb;
+ struct sk_buff *skb, *new_skb;
u8 dma_status;
u16 gbec_status;
u32 tcp_ip_status;
- u8 skb_copy_flag = 0;
- u8 skb_padding_flag = 0;
i = rx_ring->next_to_clean;
@@ -1418,55 +1422,70 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
pr_err("Receive CRC Error\n");
} else {
/* get receive length */
- /* length convert[-3], padding[-2] */
- length = (rx_desc->rx_words_eob) - 3 - 2;
+ /* length convert[-3] */
+ length = (rx_desc->rx_words_eob) - 3;
/* Decide the data conversion method */
if (!adapter->rx_csum) {
/* [Header:14][payload] */
- skb_padding_flag = 0;
- skb_copy_flag = 1;
+ if (NET_IP_ALIGN) {
+ /* Because alignment differs,
+ * the new_skb is newly allocated,
+ * and data is copied to new_skb.*/
+ new_skb = netdev_alloc_skb(netdev,
+ length + NET_IP_ALIGN);
+ if (!new_skb) {
+ /* dorrop error */
+ pr_err("New skb allocation "
+ "Error\n");
+ goto dorrop;
+ }
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ memcpy(new_skb->data, skb->data,
+ length);
+ skb = new_skb;
+ } else {
+ /* DMA buffer is used as SKB as it is.*/
+ buffer_info->skb = NULL;
+ }
} else {
/* [Header:14][padding:2][payload] */
- skb_padding_flag = 1;
- if (length < copybreak)
- skb_copy_flag = 1;
- else
- skb_copy_flag = 0;
- }
-
- /* Data conversion */
- if (skb_copy_flag) { /* recycle skb */
- struct sk_buff *new_skb;
- new_skb =
- netdev_alloc_skb(netdev,
- length + NET_IP_ALIGN);
- if (new_skb) {
- if (!skb_padding_flag) {
- skb_reserve(new_skb,
- NET_IP_ALIGN);
+ /* The length includes padding length */
+ length = length - PCH_GBE_DMA_PADDING;
+ if ((length < copybreak) ||
+ (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
+ /* Because alignment differs,
+ * the new_skb is newly allocated,
+ * and data is copied to new_skb.
+ * Padding data is deleted
+ * at the time of a copy.*/
+ new_skb = netdev_alloc_skb(netdev,
+ length + NET_IP_ALIGN);
+ if (!new_skb) {
+ /* dorrop error */
+ pr_err("New skb allocation "
+ "Error\n");
+ goto dorrop;
}
+ skb_reserve(new_skb, NET_IP_ALIGN);
memcpy(new_skb->data, skb->data,
- length);
- /* save the skb
- * in buffer_info as good */
+ ETH_HLEN);
+ memcpy(&new_skb->data[ETH_HLEN],
+ &skb->data[ETH_HLEN +
+ PCH_GBE_DMA_PADDING],
+ length - ETH_HLEN);
skb = new_skb;
- } else if (!skb_padding_flag) {
- /* dorrop error */
- pr_err("New skb allocation Error\n");
- goto dorrop;
+ } else {
+ /* Padding data is deleted
+ * by moving header data.*/
+ memmove(&skb->data[PCH_GBE_DMA_PADDING],
+ &skb->data[0], ETH_HLEN);
+ skb_reserve(skb, NET_IP_ALIGN);
+ buffer_info->skb = NULL;
}
- } else {
- buffer_info->skb = NULL;
}
- if (skb_padding_flag) {
- memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN);
- memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0],
- ETH_HLEN);
- skb_reserve(skb, NET_IP_ALIGN);
-
- }
-
+ /* The length includes FCS length */
+ length = length - ETH_FCS_LEN;
/* update status of driver */
adapter->stats.rx_bytes += length;
adapter->stats.rx_packets++;
@@ -2318,6 +2337,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
pch_gbe_set_ethtool_ops(netdev);
+ pch_gbe_mac_load_mac_addr(&adapter->hw);
pch_gbe_mac_reset_hw(&adapter->hw);
/* setup the private structure */
@@ -2426,7 +2446,7 @@ static struct pci_driver pch_gbe_pcidev = {
.id_table = pch_gbe_pcidev_id,
.probe = pch_gbe_probe,
.remove = pch_gbe_remove,
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
.driver.pm = &pch_gbe_pm_ops,
#endif
.shutdown = pch_gbe_shutdown,
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 9226cda..530ab5a 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -691,6 +691,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = {
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
+ PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
PCMCIA_DEVICE_NULL,
};
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 27e6f6d..e3ebd90 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -49,8 +49,8 @@
#include <asm/processor.h>
#define DRV_NAME "r6040"
-#define DRV_VERSION "0.26"
-#define DRV_RELDATE "30May2010"
+#define DRV_VERSION "0.27"
+#define DRV_RELDATE "23Feb2011"
/* PHY CHIP Address */
#define PHY1_ADDR 1 /* For MAC1 */
@@ -69,6 +69,8 @@
/* MAC registers */
#define MCR0 0x00 /* Control register 0 */
+#define MCR0_PROMISC 0x0020 /* Promiscuous mode */
+#define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */
#define MCR1 0x04 /* Control register 1 */
#define MAC_RST 0x0001 /* Reset the MAC */
#define MBCR 0x08 /* Bus control */
@@ -851,77 +853,92 @@ static void r6040_multicast_list(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
- u16 *adrp;
- u16 reg;
unsigned long flags;
struct netdev_hw_addr *ha;
int i;
+ u16 *adrp;
+ u16 hash_table[4] = { 0 };
+
+ spin_lock_irqsave(&lp->lock, flags);
- /* MAC Address */
+ /* Keep our MAC Address */
adrp = (u16 *)dev->dev_addr;
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
- /* Promiscous Mode */
- spin_lock_irqsave(&lp->lock, flags);
-
/* Clear AMCP & PROM bits */
- reg = ioread16(ioaddr) & ~0x0120;
- if (dev->flags & IFF_PROMISC) {
- reg |= 0x0020;
- lp->mcr0 |= 0x0020;
- }
- /* Too many multicast addresses
- * accept all traffic */
- else if ((netdev_mc_count(dev) > MCAST_MAX) ||
- (dev->flags & IFF_ALLMULTI))
- reg |= 0x0020;
+ lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN);
- iowrite16(reg, ioaddr);
- spin_unlock_irqrestore(&lp->lock, flags);
+ /* Promiscuous mode */
+ if (dev->flags & IFF_PROMISC)
+ lp->mcr0 |= MCR0_PROMISC;
- /* Build the hash table */
- if (netdev_mc_count(dev) > MCAST_MAX) {
- u16 hash_table[4];
- u32 crc;
+ /* Enable multicast hash table function to
+ * receive all multicast packets. */
+ else if (dev->flags & IFF_ALLMULTI) {
+ lp->mcr0 |= MCR0_HASH_EN;
- for (i = 0; i < 4; i++)
- hash_table[i] = 0;
+ for (i = 0; i < MCAST_MAX ; i++) {
+ iowrite16(0, ioaddr + MID_1L + 8 * i);
+ iowrite16(0, ioaddr + MID_1M + 8 * i);
+ iowrite16(0, ioaddr + MID_1H + 8 * i);
+ }
+ for (i = 0; i < 4; i++)
+ hash_table[i] = 0xffff;
+ }
+ /* Use internal multicast address registers if the number of
+ * multicast addresses is not greater than MCAST_MAX. */
+ else if (netdev_mc_count(dev) <= MCAST_MAX) {
+ i = 0;
netdev_for_each_mc_addr(ha, dev) {
- char *addrs = ha->addr;
+ u16 *adrp = (u16 *) ha->addr;
+ iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
+ iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
+ iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
+ i++;
+ }
+ while (i < MCAST_MAX) {
+ iowrite16(0, ioaddr + MID_1L + 8 * i);
+ iowrite16(0, ioaddr + MID_1M + 8 * i);
+ iowrite16(0, ioaddr + MID_1H + 8 * i);
+ i++;
+ }
+ }
+ /* Otherwise, Enable multicast hash table function. */
+ else {
+ u32 crc;
- if (!(*addrs & 1))
- continue;
+ lp->mcr0 |= MCR0_HASH_EN;
+
+ for (i = 0; i < MCAST_MAX ; i++) {
+ iowrite16(0, ioaddr + MID_1L + 8 * i);
+ iowrite16(0, ioaddr + MID_1M + 8 * i);
+ iowrite16(0, ioaddr + MID_1H + 8 * i);
+ }
- crc = ether_crc_le(6, addrs);
+ /* Build multicast hash table */
+ netdev_for_each_mc_addr(ha, dev) {
+ u8 *addrs = ha->addr;
+
+ crc = ether_crc(ETH_ALEN, addrs);
crc >>= 26;
- hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
+ hash_table[crc >> 4] |= 1 << (crc & 0xf);
}
- /* Fill the MAC hash tables with their values */
+ }
+
+ iowrite16(lp->mcr0, ioaddr + MCR0);
+
+ /* Fill the MAC hash tables with their values */
+ if (lp->mcr0 && MCR0_HASH_EN) {
iowrite16(hash_table[0], ioaddr + MAR0);
iowrite16(hash_table[1], ioaddr + MAR1);
iowrite16(hash_table[2], ioaddr + MAR2);
iowrite16(hash_table[3], ioaddr + MAR3);
}
- /* Multicast Address 1~4 case */
- i = 0;
- netdev_for_each_mc_addr(ha, dev) {
- if (i >= MCAST_MAX)
- break;
- adrp = (u16 *) ha->addr;
- iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
- iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
- iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
- i++;
- }
- while (i < MCAST_MAX) {
- iowrite16(0xffff, ioaddr + MID_1L + 8 * i);
- iowrite16(0xffff, ioaddr + MID_1M + 8 * i);
- iowrite16(0xffff, ioaddr + MID_1H + 8 * i);
- i++;
- }
+
+ spin_unlock_irqrestore(&lp->lock, flags);
}
static void netdev_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 59ccf0c..7ffdb80 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -25,6 +25,7 @@
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/firmware.h>
+#include <linux/pci-aspm.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -617,8 +618,9 @@ static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
}
}
-static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd)
+static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
{
+ void __iomem *ioaddr = tp->mmio_addr;
int i;
RTL_W8(ERIDR, cmd);
@@ -630,7 +632,7 @@ static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd)
break;
}
- ocp_write(ioaddr, 0x1, 0x30, 0x00000001);
+ ocp_write(tp, 0x1, 0x30, 0x00000001);
}
#define OOB_CMD_RESET 0x00
@@ -2868,8 +2870,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
- if (tp->mac_version == RTL_GIGA_MAC_VER_27)
+ if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
+ (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
return;
+ }
if (((tp->mac_version == RTL_GIGA_MAC_VER_23) ||
(tp->mac_version == RTL_GIGA_MAC_VER_24)) &&
@@ -2891,6 +2896,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_25:
case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
break;
}
@@ -2900,12 +2907,17 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
- if (tp->mac_version == RTL_GIGA_MAC_VER_27)
+ if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
+ (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
+ (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
return;
+ }
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_25:
case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
break;
}
@@ -3009,6 +3021,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
mii->reg_num_mask = 0x1f;
mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
+ /* disable ASPM completely as that cause random device stop working
+ * problems as well as full system hangs for some PCIe devices users */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pci_enable_device(pdev);
if (rc < 0) {
@@ -3042,7 +3059,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_mwi_2;
}
- tp->cp_cmd = PCIMulRW | RxChkSum;
+ tp->cp_cmd = RxChkSum;
if ((sizeof(dma_addr_t) > 4) &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
@@ -3190,6 +3207,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_dev_run_wake(pdev))
pm_runtime_put_noidle(&pdev->dev);
+ netif_carrier_off(dev);
+
out:
return rc;
@@ -3316,7 +3335,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
/* Disable interrupts */
rtl8169_irq_mask_and_ack(ioaddr);
- if (tp->mac_version == RTL_GIGA_MAC_VER_28) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28) {
while (RTL_R8(TxPoll) & NPQ)
udelay(20);
@@ -3845,8 +3865,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
Cxpl_dbg_sel | \
ASF | \
PktCntrDisable | \
- PCIDAC | \
- PCIMulRW)
+ Mac_dbgo_sel)
static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
{
@@ -3876,8 +3895,6 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
RTL_W8(Config1, cfg1 & ~LEDS0);
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
-
rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
}
@@ -3889,8 +3906,6 @@ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
-
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
}
static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
@@ -3916,6 +3931,8 @@ static void rtl_hw_start_8101(struct net_device *dev)
}
}
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
+
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_07:
rtl_hw_start_8102e_1(ioaddr, pdev);
@@ -3930,14 +3947,13 @@ static void rtl_hw_start_8101(struct net_device *dev)
break;
}
- RTL_W8(Cfg9346, Cfg9346_Unlock);
+ RTL_W8(Cfg9346, Cfg9346_Lock);
RTL_W8(MaxTxPacketSize, TxPacketMax);
rtl_set_rx_max_size(ioaddr, rx_buf_sz);
- tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
-
+ tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
RTL_W16(CPlusCmd, tp->cp_cmd);
RTL_W16(IntrMitigate, 0x0000);
@@ -3947,14 +3963,10 @@ static void rtl_hw_start_8101(struct net_device *dev)
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_set_rx_tx_config_registers(tp);
- RTL_W8(Cfg9346, Cfg9346_Lock);
-
RTL_R8(IntrMask);
rtl_set_rx_mode(dev);
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
-
RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
RTL_W16(IntrMask, tp->intr_event);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 0e8bb19..ca886d9 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -569,9 +569,14 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
struct ethtool_test *test, u64 *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_self_tests efx_tests;
+ struct efx_self_tests *efx_tests;
int already_up;
- int rc;
+ int rc = -ENOMEM;
+
+ efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
+ if (!efx_tests)
+ goto fail;
+
ASSERT_RTNL();
if (efx->state != STATE_RUNNING) {
@@ -589,13 +594,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed opening device.\n");
- goto fail2;
+ goto fail1;
}
}
- memset(&efx_tests, 0, sizeof(efx_tests));
-
- rc = efx_selftest(efx, &efx_tests, test->flags);
+ rc = efx_selftest(efx, efx_tests, test->flags);
if (!already_up)
dev_close(efx->net_dev);
@@ -604,10 +607,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
rc == 0 ? "passed" : "failed",
(test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
- fail2:
- fail1:
+fail1:
/* Fill ethtool results structures */
- efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data);
+ efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
+ kfree(efx_tests);
+fail:
if (rc)
test->flags |= ETH_TEST_FL_FAILED;
}
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 42daf98..35b28f4 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3856,9 +3856,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
- /* device is off until link detection */
- netif_carrier_off(dev);
-
return dev;
}
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 64bfdae..d70bde95 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1178,6 +1178,11 @@ static int smsc911x_open(struct net_device *dev)
smsc911x_reg_write(pdata, HW_CFG, 0x00050000);
smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740);
+ /* Increase the legal frame size of VLAN tagged frames to 1522 bytes */
+ spin_lock_irq(&pdata->mac_lock);
+ smsc911x_mac_write(pdata, VLAN1, ETH_P_8021Q);
+ spin_unlock_irq(&pdata->mac_lock);
+
/* Make sure EEPROM has finished loading before setting GPIO_CFG */
timeout = 50;
while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) &&
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 34a0af3..0e5f031 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1560,8 +1560,10 @@ static int stmmac_mac_device_setup(struct net_device *dev)
priv->hw = device;
- if (device_can_wakeup(priv->device))
+ if (device_can_wakeup(priv->device)) {
priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
+ enable_irq_wake(dev->irq);
+ }
return 0;
}
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 93b32d3..06c0e503 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -11158,7 +11158,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
break; /* We have no PHY */
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+ if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
+ ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+ !netif_running(dev)))
return -EAGAIN;
spin_lock_bh(&tp->lock);
@@ -11174,7 +11176,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
break; /* We have no PHY */
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+ if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
+ ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+ !netif_running(dev)))
return -EAGAIN;
spin_lock_bh(&tp->lock);
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 02b622e..5002f5b 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -651,6 +651,10 @@ static const struct usb_device_id products[] = {
.driver_info = (unsigned long)&dm9601_info,
},
{
+ USB_DEVICE(0x0fe6, 0x9700), /* DM9601 USB to Fast Ethernet Adapter */
+ .driver_info = (unsigned long)&dm9601_info,
+ },
+ {
USB_DEVICE(0x0a46, 0x9000), /* DM9000E */
.driver_info = (unsigned long)&dm9601_info,
},
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index bed8fce..6d83812 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2628,15 +2628,15 @@ exit:
static void hso_free_tiomget(struct hso_serial *serial)
{
- struct hso_tiocmget *tiocmget = serial->tiocmget;
+ struct hso_tiocmget *tiocmget;
+ if (!serial)
+ return;
+ tiocmget = serial->tiocmget;
if (tiocmget) {
- if (tiocmget->urb) {
- usb_free_urb(tiocmget->urb);
- tiocmget->urb = NULL;
- }
+ usb_free_urb(tiocmget->urb);
+ tiocmget->urb = NULL;
serial->tiocmget = NULL;
kfree(tiocmget);
-
}
}
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ed9a416..95c41d5 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -931,8 +931,10 @@ fail_halt:
if (urb != NULL) {
clear_bit (EVENT_RX_MEMORY, &dev->flags);
status = usb_autopm_get_interface(dev->intf);
- if (status < 0)
+ if (status < 0) {
+ usb_free_urb(urb);
goto fail_lowmem;
+ }
if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
resched = 0;
usb_autopm_put_interface(dev->intf);
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 78c26fd..62ce2f4 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -282,6 +282,34 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah)
return 0;
}
+/*
+ * Wait for synth to settle
+ */
+static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
+{
+ /*
+ * On 5211+ read activation -> rx delay
+ * and use it (100ns steps).
+ */
+ if (ah->ah_version != AR5K_AR5210) {
+ u32 delay;
+ delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
+ AR5K_PHY_RX_DELAY_M;
+ delay = (channel->hw_value & CHANNEL_CCK) ?
+ ((delay << 2) / 22) : (delay / 10);
+ if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
+ delay = delay << 1;
+ if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
+ delay = delay << 2;
+ /* XXX: /2 on turbo ? Let's be safe
+ * for now */
+ udelay(100 + delay);
+ } else {
+ mdelay(1);
+ }
+}
+
/**********************\
* RF Gain optimization *
@@ -1253,6 +1281,7 @@ static int ath5k_hw_channel(struct ath5k_hw *ah,
case AR5K_RF5111:
ret = ath5k_hw_rf5111_channel(ah, channel);
break;
+ case AR5K_RF2317:
case AR5K_RF2425:
ret = ath5k_hw_rf2425_channel(ah, channel);
break;
@@ -3237,6 +3266,13 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
/* Failed */
if (i >= 100)
return -EIO;
+
+ /* Set channel and wait for synth */
+ ret = ath5k_hw_channel(ah, channel);
+ if (ret)
+ return ret;
+
+ ath5k_hw_wait_for_synth(ah, channel);
}
/*
@@ -3251,13 +3287,53 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
if (ret)
return ret;
+ /* Write OFDM timings on 5212*/
+ if (ah->ah_version == AR5K_AR5212 &&
+ channel->hw_value & CHANNEL_OFDM) {
+
+ ret = ath5k_hw_write_ofdm_timings(ah, channel);
+ if (ret)
+ return ret;
+
+ /* Spur info is available only from EEPROM versions
+ * greater than 5.3, but the EEPROM routines will use
+ * static values for older versions */
+ if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
+ ath5k_hw_set_spur_mitigation_filter(ah,
+ channel);
+ }
+
+ /* If we used fast channel switching
+ * we are done, release RF bus and
+ * fire up NF calibration.
+ *
+ * Note: Only NF calibration due to
+ * channel change, not AGC calibration
+ * since AGC is still running !
+ */
+ if (fast) {
+ /*
+ * Release RF Bus grant
+ */
+ AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
+ AR5K_PHY_RFBUS_REQ_REQUEST);
+
+ /*
+ * Start NF calibration
+ */
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_NF);
+
+ return ret;
+ }
+
/*
* For 5210 we do all initialization using
* initvals, so we don't have to modify
* any settings (5210 also only supports
* a/aturbo modes)
*/
- if ((ah->ah_version != AR5K_AR5210) && !fast) {
+ if (ah->ah_version != AR5K_AR5210) {
/*
* Write initial RF gain settings
@@ -3276,22 +3352,6 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
if (ret)
return ret;
- /* Write OFDM timings on 5212*/
- if (ah->ah_version == AR5K_AR5212 &&
- channel->hw_value & CHANNEL_OFDM) {
-
- ret = ath5k_hw_write_ofdm_timings(ah, channel);
- if (ret)
- return ret;
-
- /* Spur info is available only from EEPROM versions
- * greater than 5.3, but the EEPROM routines will use
- * static values for older versions */
- if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
- ath5k_hw_set_spur_mitigation_filter(ah,
- channel);
- }
-
/*Enable/disable 802.11b mode on 5111
(enable 2111 frequency converter + CCK)*/
if (ah->ah_radio == AR5K_RF5111) {
@@ -3322,47 +3382,20 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
*/
ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
+ ath5k_hw_wait_for_synth(ah, channel);
+
/*
- * On 5211+ read activation -> rx delay
- * and use it.
+ * Perform ADC test to see if baseband is ready
+ * Set tx hold and check adc test register
*/
- if (ah->ah_version != AR5K_AR5210) {
- u32 delay;
- delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
- AR5K_PHY_RX_DELAY_M;
- delay = (channel->hw_value & CHANNEL_CCK) ?
- ((delay << 2) / 22) : (delay / 10);
- if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
- delay = delay << 1;
- if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
- delay = delay << 2;
- /* XXX: /2 on turbo ? Let's be safe
- * for now */
- udelay(100 + delay);
- } else {
- mdelay(1);
- }
-
- if (fast)
- /*
- * Release RF Bus grant
- */
- AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
- AR5K_PHY_RFBUS_REQ_REQUEST);
- else {
- /*
- * Perform ADC test to see if baseband is ready
- * Set tx hold and check adc test register
- */
- phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
- ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
- for (i = 0; i <= 20; i++) {
- if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
- break;
- udelay(200);
- }
- ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
+ phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
+ ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
+ for (i = 0; i <= 20; i++) {
+ if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
+ break;
+ udelay(200);
}
+ ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
/*
* Start automatic gain control calibration
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 23838e3..1a7fa6e 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -21,7 +21,6 @@
#include <linux/device.h>
#include <linux/leds.h>
#include <linux/completion.h>
-#include <linux/pm_qos_params.h>
#include "debug.h"
#include "common.h"
@@ -57,8 +56,6 @@ struct ath_node;
#define A_MAX(a, b) ((a) > (b) ? (a) : (b))
-#define ATH9K_PM_QOS_DEFAULT_VALUE 55
-
#define TSF_TO_TU(_h,_l) \
((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
@@ -633,8 +630,6 @@ struct ath_softc {
struct ath_descdma txsdma;
struct ath_ant_comb ant_comb;
-
- struct pm_qos_request_list pm_qos_req;
};
struct ath_wiphy {
@@ -666,7 +661,6 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
extern struct ieee80211_ops ath9k_ops;
extern int ath9k_modparam_nohwcrypt;
extern int led_blink;
-extern int ath9k_pm_qos_value;
extern bool is_ath9k_unloaded;
irqreturn_t ath_isr(int irq, void *dev);
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 5ab3084..07b1633 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -219,8 +219,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
struct tx_buf *tx_buf = NULL;
struct sk_buff *nskb = NULL;
int ret = 0, i;
- u16 *hdr, tx_skb_cnt = 0;
+ u16 tx_skb_cnt = 0;
u8 *buf;
+ __le16 *hdr;
if (hif_dev->tx.tx_skb_cnt == 0)
return 0;
@@ -245,9 +246,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
buf = tx_buf->buf;
buf += tx_buf->offset;
- hdr = (u16 *)buf;
- *hdr++ = nskb->len;
- *hdr++ = ATH_USB_TX_STREAM_MODE_TAG;
+ hdr = (__le16 *)buf;
+ *hdr++ = cpu_to_le16(nskb->len);
+ *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
buf += 4;
memcpy(buf, nskb->data, nskb->len);
tx_buf->len = nskb->len + 4;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 087a6a9..a033d01 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -41,10 +41,6 @@ static int ath9k_btcoex_enable;
module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
-int ath9k_pm_qos_value = ATH9K_PM_QOS_DEFAULT_VALUE;
-module_param_named(pmqos, ath9k_pm_qos_value, int, S_IRUSR | S_IRGRP | S_IROTH);
-MODULE_PARM_DESC(pmqos, "User specified PM-QOS value");
-
bool is_ath9k_unloaded;
/* We use the hw_value as an index into our private channel structure */
@@ -762,9 +758,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
ath_init_leds(sc);
ath_start_rfkill_poll(sc);
- pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
-
return 0;
error_world:
@@ -831,7 +824,6 @@ void ath9k_deinit_device(struct ath_softc *sc)
}
ieee80211_unregister_hw(hw);
- pm_qos_remove_request(&sc->pm_qos_req);
ath_rx_cleanup(sc);
ath_tx_cleanup(sc);
ath9k_deinit_softc(sc);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 180170d..2915b11 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -885,7 +885,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
struct ath_common *common = ath9k_hw_common(ah);
if (!(ints & ATH9K_INT_GLOBAL))
- ath9k_hw_enable_interrupts(ah);
+ ath9k_hw_disable_interrupts(ah);
ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
@@ -963,7 +963,8 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
}
- ath9k_hw_enable_interrupts(ah);
+ if (ints & ATH9K_INT_GLOBAL)
+ ath9k_hw_enable_interrupts(ah);
return;
}
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index da5c645..a09d15f 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1173,12 +1173,6 @@ static int ath9k_start(struct ieee80211_hw *hw)
ath9k_btcoex_timer_resume(sc);
}
- /* User has the option to provide pm-qos value as a module
- * parameter rather than using the default value of
- * 'ATH9K_PM_QOS_DEFAULT_VALUE'.
- */
- pm_qos_update_request(&sc->pm_qos_req, ath9k_pm_qos_value);
-
if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en)
common->bus_ops->extn_synch_en(common);
@@ -1345,8 +1339,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
sc->sc_flags |= SC_OP_INVALID;
- pm_qos_update_request(&sc->pm_qos_req, PM_QOS_DEFAULT_VALUE);
-
mutex_unlock(&sc->mutex);
ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 537732e..f82c400 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -118,6 +118,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
{ USB_DEVICE(0x057c, 0x8402) },
/* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
{ USB_DEVICE(0x1668, 0x1200) },
+ /* Airlive X.USB a/b/g/n */
+ { USB_DEVICE(0x1b75, 0x9170) },
/* terminate */
{}
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 61915f3..471a52a 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -706,11 +706,10 @@ static void schedule_reset(struct ipw2100_priv *priv)
netif_stop_queue(priv->net_dev);
priv->status |= STATUS_RESET_PENDING;
if (priv->reset_backoff)
- queue_delayed_work(priv->workqueue, &priv->reset_work,
- priv->reset_backoff * HZ);
+ schedule_delayed_work(&priv->reset_work,
+ priv->reset_backoff * HZ);
else
- queue_delayed_work(priv->workqueue, &priv->reset_work,
- 0);
+ schedule_delayed_work(&priv->reset_work, 0);
if (priv->reset_backoff < MAX_RESET_BACKOFF)
priv->reset_backoff++;
@@ -1474,7 +1473,7 @@ static int ipw2100_enable_adapter(struct ipw2100_priv *priv)
if (priv->stop_hang_check) {
priv->stop_hang_check = 0;
- queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2);
+ schedule_delayed_work(&priv->hang_check, HZ / 2);
}
fail_up:
@@ -1808,8 +1807,8 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
if (priv->stop_rf_kill) {
priv->stop_rf_kill = 0;
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies_relative(HZ));
+ schedule_delayed_work(&priv->rf_kill,
+ round_jiffies_relative(HZ));
}
deferred = 1;
@@ -2086,7 +2085,7 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
priv->status |= STATUS_ASSOCIATING;
priv->connect_start = get_seconds();
- queue_delayed_work(priv->workqueue, &priv->wx_event_work, HZ / 10);
+ schedule_delayed_work(&priv->wx_event_work, HZ / 10);
}
static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
@@ -2166,9 +2165,9 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status)
return;
if (priv->status & STATUS_SECURITY_UPDATED)
- queue_delayed_work(priv->workqueue, &priv->security_work, 0);
+ schedule_delayed_work(&priv->security_work, 0);
- queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0);
+ schedule_delayed_work(&priv->wx_event_work, 0);
}
static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
@@ -2183,8 +2182,7 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
/* Make sure the RF Kill check timer is running */
priv->stop_rf_kill = 0;
cancel_delayed_work(&priv->rf_kill);
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies_relative(HZ));
+ schedule_delayed_work(&priv->rf_kill, round_jiffies_relative(HZ));
}
static void send_scan_event(void *data)
@@ -2219,13 +2217,12 @@ static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
/* Only userspace-requested scan completion events go out immediately */
if (!priv->user_requested_scan) {
if (!delayed_work_pending(&priv->scan_event_later))
- queue_delayed_work(priv->workqueue,
- &priv->scan_event_later,
- round_jiffies_relative(msecs_to_jiffies(4000)));
+ schedule_delayed_work(&priv->scan_event_later,
+ round_jiffies_relative(msecs_to_jiffies(4000)));
} else {
priv->user_requested_scan = 0;
cancel_delayed_work(&priv->scan_event_later);
- queue_work(priv->workqueue, &priv->scan_event_now);
+ schedule_work(&priv->scan_event_now);
}
}
@@ -4329,8 +4326,8 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio)
/* Make sure the RF_KILL check timer is running */
priv->stop_rf_kill = 0;
cancel_delayed_work(&priv->rf_kill);
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies_relative(HZ));
+ schedule_delayed_work(&priv->rf_kill,
+ round_jiffies_relative(HZ));
} else
schedule_reset(priv);
}
@@ -4461,20 +4458,17 @@ static void bd_queue_initialize(struct ipw2100_priv *priv,
IPW_DEBUG_INFO("exit\n");
}
-static void ipw2100_kill_workqueue(struct ipw2100_priv *priv)
+static void ipw2100_kill_works(struct ipw2100_priv *priv)
{
- if (priv->workqueue) {
- priv->stop_rf_kill = 1;
- priv->stop_hang_check = 1;
- cancel_delayed_work(&priv->reset_work);
- cancel_delayed_work(&priv->security_work);
- cancel_delayed_work(&priv->wx_event_work);
- cancel_delayed_work(&priv->hang_check);
- cancel_delayed_work(&priv->rf_kill);
- cancel_delayed_work(&priv->scan_event_later);
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
- }
+ priv->stop_rf_kill = 1;
+ priv->stop_hang_check = 1;
+ cancel_delayed_work_sync(&priv->reset_work);
+ cancel_delayed_work_sync(&priv->security_work);
+ cancel_delayed_work_sync(&priv->wx_event_work);
+ cancel_delayed_work_sync(&priv->hang_check);
+ cancel_delayed_work_sync(&priv->rf_kill);
+ cancel_work_sync(&priv->scan_event_now);
+ cancel_delayed_work_sync(&priv->scan_event_later);
}
static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
@@ -6046,7 +6040,7 @@ static void ipw2100_hang_check(struct work_struct *work)
priv->last_rtc = rtc;
if (!priv->stop_hang_check)
- queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2);
+ schedule_delayed_work(&priv->hang_check, HZ / 2);
spin_unlock_irqrestore(&priv->low_lock, flags);
}
@@ -6062,8 +6056,8 @@ static void ipw2100_rf_kill(struct work_struct *work)
if (rf_kill_active(priv)) {
IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
if (!priv->stop_rf_kill)
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies_relative(HZ));
+ schedule_delayed_work(&priv->rf_kill,
+ round_jiffies_relative(HZ));
goto exit_unlock;
}
@@ -6209,8 +6203,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
INIT_LIST_HEAD(&priv->fw_pend_list);
INIT_STAT(&priv->fw_pend_stat);
- priv->workqueue = create_workqueue(DRV_NAME);
-
INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter);
INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work);
INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work);
@@ -6410,7 +6402,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
if (dev->irq)
free_irq(dev->irq, priv);
- ipw2100_kill_workqueue(priv);
+ ipw2100_kill_works(priv);
/* These are safe to call even if they weren't allocated */
ipw2100_queues_free(priv);
@@ -6460,9 +6452,7 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
* first, then close() will crash. */
unregister_netdev(dev);
- /* ipw2100_down will ensure that there is no more pending work
- * in the workqueue's, so we can safely remove them now. */
- ipw2100_kill_workqueue(priv);
+ ipw2100_kill_works(priv);
ipw2100_queues_free(priv);
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h
index 838002b..99cba96 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/ipw2x00/ipw2100.h
@@ -580,7 +580,6 @@ struct ipw2100_priv {
struct tasklet_struct irq_tasklet;
- struct workqueue_struct *workqueue;
struct delayed_work reset_work;
struct delayed_work security_work;
struct delayed_work wx_event_work;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index ae438ed..160881f 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -894,9 +894,8 @@ static void ipw_led_link_on(struct ipw_priv *priv)
/* If we aren't associated, schedule turning the LED off */
if (!(priv->status & STATUS_ASSOCIATED))
- queue_delayed_work(priv->workqueue,
- &priv->led_link_off,
- LD_TIME_LINK_ON);
+ schedule_delayed_work(&priv->led_link_off,
+ LD_TIME_LINK_ON);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -939,8 +938,8 @@ static void ipw_led_link_off(struct ipw_priv *priv)
* turning the LED on (blink while unassociated) */
if (!(priv->status & STATUS_RF_KILL_MASK) &&
!(priv->status & STATUS_ASSOCIATED))
- queue_delayed_work(priv->workqueue, &priv->led_link_on,
- LD_TIME_LINK_OFF);
+ schedule_delayed_work(&priv->led_link_on,
+ LD_TIME_LINK_OFF);
}
@@ -980,13 +979,11 @@ static void __ipw_led_activity_on(struct ipw_priv *priv)
priv->status |= STATUS_LED_ACT_ON;
cancel_delayed_work(&priv->led_act_off);
- queue_delayed_work(priv->workqueue, &priv->led_act_off,
- LD_TIME_ACT_ON);
+ schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
} else {
/* Reschedule LED off for full time period */
cancel_delayed_work(&priv->led_act_off);
- queue_delayed_work(priv->workqueue, &priv->led_act_off,
- LD_TIME_ACT_ON);
+ schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
}
}
@@ -1795,13 +1792,11 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
if (disable_radio) {
priv->status |= STATUS_RF_KILL_SW;
- if (priv->workqueue) {
- cancel_delayed_work(&priv->request_scan);
- cancel_delayed_work(&priv->request_direct_scan);
- cancel_delayed_work(&priv->request_passive_scan);
- cancel_delayed_work(&priv->scan_event);
- }
- queue_work(priv->workqueue, &priv->down);
+ cancel_delayed_work(&priv->request_scan);
+ cancel_delayed_work(&priv->request_direct_scan);
+ cancel_delayed_work(&priv->request_passive_scan);
+ cancel_delayed_work(&priv->scan_event);
+ schedule_work(&priv->down);
} else {
priv->status &= ~STATUS_RF_KILL_SW;
if (rf_kill_active(priv)) {
@@ -1809,10 +1804,10 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
"disabled by HW switch\n");
/* Make sure the RF_KILL check timer is running */
cancel_delayed_work(&priv->rf_kill);
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- round_jiffies_relative(2 * HZ));
+ schedule_delayed_work(&priv->rf_kill,
+ round_jiffies_relative(2 * HZ));
} else
- queue_work(priv->workqueue, &priv->up);
+ schedule_work(&priv->up);
}
return 1;
@@ -2063,7 +2058,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
cancel_delayed_work(&priv->request_passive_scan);
cancel_delayed_work(&priv->scan_event);
schedule_work(&priv->link_down);
- queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
+ schedule_delayed_work(&priv->rf_kill, 2 * HZ);
handled |= IPW_INTA_BIT_RF_KILL_DONE;
}
@@ -2103,7 +2098,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
priv->status &= ~STATUS_HCMD_ACTIVE;
wake_up_interruptible(&priv->wait_command_queue);
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
handled |= IPW_INTA_BIT_FATAL_ERROR;
}
@@ -2323,11 +2318,6 @@ static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
}
-/*
- * NOTE: This must be executed from our workqueue as it results in udelay
- * being called which may corrupt the keyboard if executed on default
- * workqueue
- */
static void ipw_adapter_restart(void *adapter)
{
struct ipw_priv *priv = adapter;
@@ -2368,13 +2358,13 @@ static void ipw_scan_check(void *data)
IPW_DEBUG_SCAN("Scan completion watchdog resetting "
"adapter after (%dms).\n",
jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
} else if (priv->status & STATUS_SCANNING) {
IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
"after (%dms).\n",
jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
ipw_abort_scan(priv);
- queue_delayed_work(priv->workqueue, &priv->scan_check, HZ);
+ schedule_delayed_work(&priv->scan_check, HZ);
}
}
@@ -3943,7 +3933,7 @@ static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
if (priv->status & STATUS_ASSOCIATING) {
IPW_DEBUG_ASSOC("Disassociating while associating.\n");
- queue_work(priv->workqueue, &priv->disassociate);
+ schedule_work(&priv->disassociate);
return;
}
@@ -4360,8 +4350,7 @@ static void ipw_gather_stats(struct ipw_priv *priv)
priv->quality = quality;
- queue_delayed_work(priv->workqueue, &priv->gather_stats,
- IPW_STATS_INTERVAL);
+ schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL);
}
static void ipw_bg_gather_stats(struct work_struct *work)
@@ -4396,10 +4385,10 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv,
IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
IPW_DL_STATE,
"Aborting scan with missed beacon.\n");
- queue_work(priv->workqueue, &priv->abort_scan);
+ schedule_work(&priv->abort_scan);
}
- queue_work(priv->workqueue, &priv->disassociate);
+ schedule_work(&priv->disassociate);
return;
}
@@ -4425,8 +4414,7 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv,
if (!(priv->status & STATUS_ROAMING)) {
priv->status |= STATUS_ROAMING;
if (!(priv->status & STATUS_SCANNING))
- queue_delayed_work(priv->workqueue,
- &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
}
return;
}
@@ -4439,7 +4427,7 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv,
* channels..) */
IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
"Aborting scan with missed beacon.\n");
- queue_work(priv->workqueue, &priv->abort_scan);
+ schedule_work(&priv->abort_scan);
}
IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
@@ -4462,8 +4450,8 @@ static void handle_scan_event(struct ipw_priv *priv)
/* Only userspace-requested scan completion events go out immediately */
if (!priv->user_requested_scan) {
if (!delayed_work_pending(&priv->scan_event))
- queue_delayed_work(priv->workqueue, &priv->scan_event,
- round_jiffies_relative(msecs_to_jiffies(4000)));
+ schedule_delayed_work(&priv->scan_event,
+ round_jiffies_relative(msecs_to_jiffies(4000)));
} else {
union iwreq_data wrqu;
@@ -4516,20 +4504,17 @@ static void ipw_rx_notification(struct ipw_priv *priv,
IPW_DEBUG_ASSOC
("queueing adhoc check\n");
- queue_delayed_work(priv->
- workqueue,
- &priv->
- adhoc_check,
- le16_to_cpu(priv->
- assoc_request.
- beacon_interval));
+ schedule_delayed_work(
+ &priv->adhoc_check,
+ le16_to_cpu(priv->
+ assoc_request.
+ beacon_interval));
break;
}
priv->status &= ~STATUS_ASSOCIATING;
priv->status |= STATUS_ASSOCIATED;
- queue_work(priv->workqueue,
- &priv->system_config);
+ schedule_work(&priv->system_config);
#ifdef CONFIG_IPW2200_QOS
#define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
@@ -4792,43 +4777,37 @@ static void ipw_rx_notification(struct ipw_priv *priv,
#ifdef CONFIG_IPW2200_MONITOR
if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
priv->status |= STATUS_SCAN_FORCED;
- queue_delayed_work(priv->workqueue,
- &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
break;
}
priv->status &= ~STATUS_SCAN_FORCED;
#endif /* CONFIG_IPW2200_MONITOR */
/* Do queued direct scans first */
- if (priv->status & STATUS_DIRECT_SCAN_PENDING) {
- queue_delayed_work(priv->workqueue,
- &priv->request_direct_scan, 0);
- }
+ if (priv->status & STATUS_DIRECT_SCAN_PENDING)
+ schedule_delayed_work(&priv->request_direct_scan, 0);
if (!(priv->status & (STATUS_ASSOCIATED |
STATUS_ASSOCIATING |
STATUS_ROAMING |
STATUS_DISASSOCIATING)))
- queue_work(priv->workqueue, &priv->associate);
+ schedule_work(&priv->associate);
else if (priv->status & STATUS_ROAMING) {
if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
/* If a scan completed and we are in roam mode, then
* the scan that completed was the one requested as a
* result of entering roam... so, schedule the
* roam work */
- queue_work(priv->workqueue,
- &priv->roam);
+ schedule_work(&priv->roam);
else
/* Don't schedule if we aborted the scan */
priv->status &= ~STATUS_ROAMING;
} else if (priv->status & STATUS_SCAN_PENDING)
- queue_delayed_work(priv->workqueue,
- &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
else if (priv->config & CFG_BACKGROUND_SCAN
&& priv->status & STATUS_ASSOCIATED)
- queue_delayed_work(priv->workqueue,
- &priv->request_scan,
- round_jiffies_relative(HZ));
+ schedule_delayed_work(&priv->request_scan,
+ round_jiffies_relative(HZ));
/* Send an empty event to user space.
* We don't send the received data on the event because
@@ -5192,7 +5171,7 @@ static void ipw_rx_queue_restock(struct ipw_priv *priv)
/* If the pre-allocated buffer pool is dropping low, schedule to
* refill it */
if (rxq->free_count <= RX_LOW_WATERMARK)
- queue_work(priv->workqueue, &priv->rx_replenish);
+ schedule_work(&priv->rx_replenish);
/* If we've added more space for the firmware to place data, tell it */
if (write != rxq->write)
@@ -6133,8 +6112,8 @@ static void ipw_adhoc_check(void *data)
return;
}
- queue_delayed_work(priv->workqueue, &priv->adhoc_check,
- le16_to_cpu(priv->assoc_request.beacon_interval));
+ schedule_delayed_work(&priv->adhoc_check,
+ le16_to_cpu(priv->assoc_request.beacon_interval));
}
static void ipw_bg_adhoc_check(struct work_struct *work)
@@ -6523,8 +6502,7 @@ send_request:
} else
priv->status &= ~STATUS_SCAN_PENDING;
- queue_delayed_work(priv->workqueue, &priv->scan_check,
- IPW_SCAN_CHECK_WATCHDOG);
+ schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG);
done:
mutex_unlock(&priv->mutex);
return err;
@@ -6994,8 +6972,7 @@ static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
!memcmp(network->ssid,
priv->assoc_network->ssid,
network->ssid_len)) {
- queue_work(priv->workqueue,
- &priv->merge_networks);
+ schedule_work(&priv->merge_networks);
}
}
@@ -7663,7 +7640,7 @@ static int ipw_associate(void *data)
if (priv->status & STATUS_DISASSOCIATING) {
IPW_DEBUG_ASSOC("Not attempting association (in "
"disassociating)\n ");
- queue_work(priv->workqueue, &priv->associate);
+ schedule_work(&priv->associate);
return 0;
}
@@ -7731,12 +7708,10 @@ static int ipw_associate(void *data)
if (!(priv->status & STATUS_SCANNING)) {
if (!(priv->config & CFG_SPEED_SCAN))
- queue_delayed_work(priv->workqueue,
- &priv->request_scan,
- SCAN_INTERVAL);
+ schedule_delayed_work(&priv->request_scan,
+ SCAN_INTERVAL);
else
- queue_delayed_work(priv->workqueue,
- &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
}
return 0;
@@ -8899,7 +8874,7 @@ static int ipw_wx_set_mode(struct net_device *dev,
priv->ieee->iw_mode = wrqu->mode;
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
mutex_unlock(&priv->mutex);
return err;
}
@@ -9598,7 +9573,7 @@ static int ipw_wx_set_scan(struct net_device *dev,
IPW_DEBUG_WX("Start scan\n");
- queue_delayed_work(priv->workqueue, work, 0);
+ schedule_delayed_work(work, 0);
return 0;
}
@@ -9937,7 +9912,7 @@ static int ipw_wx_set_monitor(struct net_device *dev,
#else
priv->net_dev->type = ARPHRD_IEEE80211;
#endif
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
}
ipw_set_channel(priv, parms[1]);
@@ -9947,7 +9922,7 @@ static int ipw_wx_set_monitor(struct net_device *dev,
return 0;
}
priv->net_dev->type = ARPHRD_ETHER;
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
}
mutex_unlock(&priv->mutex);
return 0;
@@ -9961,7 +9936,7 @@ static int ipw_wx_reset(struct net_device *dev,
{
struct ipw_priv *priv = libipw_priv(dev);
IPW_DEBUG_WX("RESET\n");
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
return 0;
}
@@ -10551,7 +10526,7 @@ static int ipw_net_set_mac_address(struct net_device *dev, void *p)
memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
printk(KERN_INFO "%s: Setting MAC to %pM\n",
priv->net_dev->name, priv->mac_addr);
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
mutex_unlock(&priv->mutex);
return 0;
}
@@ -10684,9 +10659,7 @@ static void ipw_rf_kill(void *adapter)
if (rf_kill_active(priv)) {
IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
- if (priv->workqueue)
- queue_delayed_work(priv->workqueue,
- &priv->rf_kill, 2 * HZ);
+ schedule_delayed_work(&priv->rf_kill, 2 * HZ);
goto exit_unlock;
}
@@ -10697,7 +10670,7 @@ static void ipw_rf_kill(void *adapter)
"device\n");
/* we can not do an adapter restart while inside an irq lock */
- queue_work(priv->workqueue, &priv->adapter_restart);
+ schedule_work(&priv->adapter_restart);
} else
IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
"enabled\n");
@@ -10735,7 +10708,7 @@ static void ipw_link_up(struct ipw_priv *priv)
notify_wx_assoc_event(priv);
if (priv->config & CFG_BACKGROUND_SCAN)
- queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
+ schedule_delayed_work(&priv->request_scan, HZ);
}
static void ipw_bg_link_up(struct work_struct *work)
@@ -10764,7 +10737,7 @@ static void ipw_link_down(struct ipw_priv *priv)
if (!(priv->status & STATUS_EXIT_PENDING)) {
/* Queue up another scan... */
- queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
} else
cancel_delayed_work(&priv->scan_event);
}
@@ -10782,7 +10755,6 @@ static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
{
int ret = 0;
- priv->workqueue = create_workqueue(DRV_NAME);
init_waitqueue_head(&priv->wait_command_queue);
init_waitqueue_head(&priv->wait_state);
@@ -11339,8 +11311,7 @@ static int ipw_up(struct ipw_priv *priv)
IPW_WARNING("Radio Frequency Kill Switch is On:\n"
"Kill switch must be turned off for "
"wireless networking to work.\n");
- queue_delayed_work(priv->workqueue, &priv->rf_kill,
- 2 * HZ);
+ schedule_delayed_work(&priv->rf_kill, 2 * HZ);
return 0;
}
@@ -11350,8 +11321,7 @@ static int ipw_up(struct ipw_priv *priv)
/* If configure to try and auto-associate, kick
* off a scan. */
- queue_delayed_work(priv->workqueue,
- &priv->request_scan, 0);
+ schedule_delayed_work(&priv->request_scan, 0);
return 0;
}
@@ -11817,7 +11787,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
if (err) {
IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
- goto out_destroy_workqueue;
+ goto out_iounmap;
}
SET_NETDEV_DEV(net_dev, &pdev->dev);
@@ -11885,9 +11855,6 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
out_release_irq:
free_irq(pdev->irq, priv);
- out_destroy_workqueue:
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
out_iounmap:
iounmap(priv->hw_base);
out_pci_release_regions:
@@ -11930,18 +11897,31 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev)
kfree(priv->cmdlog);
priv->cmdlog = NULL;
}
- /* ipw_down will ensure that there is no more pending work
- * in the workqueue's, so we can safely remove them now. */
- cancel_delayed_work(&priv->adhoc_check);
- cancel_delayed_work(&priv->gather_stats);
- cancel_delayed_work(&priv->request_scan);
- cancel_delayed_work(&priv->request_direct_scan);
- cancel_delayed_work(&priv->request_passive_scan);
- cancel_delayed_work(&priv->scan_event);
- cancel_delayed_work(&priv->rf_kill);
- cancel_delayed_work(&priv->scan_check);
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
+
+ /* make sure all works are inactive */
+ cancel_delayed_work_sync(&priv->adhoc_check);
+ cancel_work_sync(&priv->associate);
+ cancel_work_sync(&priv->disassociate);
+ cancel_work_sync(&priv->system_config);
+ cancel_work_sync(&priv->rx_replenish);
+ cancel_work_sync(&priv->adapter_restart);
+ cancel_delayed_work_sync(&priv->rf_kill);
+ cancel_work_sync(&priv->up);
+ cancel_work_sync(&priv->down);
+ cancel_delayed_work_sync(&priv->request_scan);
+ cancel_delayed_work_sync(&priv->request_direct_scan);
+ cancel_delayed_work_sync(&priv->request_passive_scan);
+ cancel_delayed_work_sync(&priv->scan_event);
+ cancel_delayed_work_sync(&priv->gather_stats);
+ cancel_work_sync(&priv->abort_scan);
+ cancel_work_sync(&priv->roam);
+ cancel_delayed_work_sync(&priv->scan_check);
+ cancel_work_sync(&priv->link_up);
+ cancel_work_sync(&priv->link_down);
+ cancel_delayed_work_sync(&priv->led_link_on);
+ cancel_delayed_work_sync(&priv->led_link_off);
+ cancel_delayed_work_sync(&priv->led_act_off);
+ cancel_work_sync(&priv->merge_networks);
/* Free MAC hash list for ADHOC */
for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
@@ -12029,7 +12009,7 @@ static int ipw_pci_resume(struct pci_dev *pdev)
priv->suspend_time = get_seconds() - priv->suspend_at;
/* Bring the device back up */
- queue_work(priv->workqueue, &priv->up);
+ schedule_work(&priv->up);
return 0;
}
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h
index d7d049c..0441445 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/ipw2x00/ipw2200.h
@@ -1299,8 +1299,6 @@ struct ipw_priv {
u8 direct_scan_ssid[IW_ESSID_MAX_SIZE];
u8 direct_scan_ssid_len;
- struct workqueue_struct *workqueue;
-
struct delayed_work adhoc_check;
struct work_struct associate;
struct work_struct disassociate;
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index a9b852b..39b6f16 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -402,72 +402,6 @@ static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
}
#endif
-/**
- * iwl3945_good_plcp_health - checks for plcp error.
- *
- * When the plcp error is exceeding the thresholds, reset the radio
- * to improve the throughput.
- */
-static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt)
-{
- bool rc = true;
- struct iwl3945_notif_statistics current_stat;
- int combined_plcp_delta;
- unsigned int plcp_msec;
- unsigned long plcp_received_jiffies;
-
- if (priv->cfg->base_params->plcp_delta_threshold ==
- IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
- IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
- return rc;
- }
- memcpy(&current_stat, pkt->u.raw, sizeof(struct
- iwl3945_notif_statistics));
- /*
- * check for plcp_err and trigger radio reset if it exceeds
- * the plcp error threshold plcp_delta.
- */
- plcp_received_jiffies = jiffies;
- plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
- (long) priv->plcp_jiffies);
- priv->plcp_jiffies = plcp_received_jiffies;
- /*
- * check to make sure plcp_msec is not 0 to prevent division
- * by zero.
- */
- if (plcp_msec) {
- combined_plcp_delta =
- (le32_to_cpu(current_stat.rx.ofdm.plcp_err) -
- le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err));
-
- if ((combined_plcp_delta > 0) &&
- ((combined_plcp_delta * 100) / plcp_msec) >
- priv->cfg->base_params->plcp_delta_threshold) {
- /*
- * if plcp_err exceed the threshold, the following
- * data is printed in csv format:
- * Text: plcp_err exceeded %d,
- * Received ofdm.plcp_err,
- * Current ofdm.plcp_err,
- * combined_plcp_delta,
- * plcp_msec
- */
- IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
- "%u, %d, %u mSecs\n",
- priv->cfg->base_params->plcp_delta_threshold,
- le32_to_cpu(current_stat.rx.ofdm.plcp_err),
- combined_plcp_delta, plcp_msec);
- /*
- * Reset the RF radio due to the high plcp
- * error rate
- */
- rc = false;
- }
- }
- return rc;
-}
-
void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
@@ -2734,7 +2668,6 @@ static struct iwl_lib_ops iwl3945_lib = {
.isr_ops = {
.isr = iwl_isr_legacy,
},
- .check_plcp_health = iwl3945_good_plcp_health,
.debugfs_ops = {
.rx_stats_read = iwl3945_ucode_rx_stats_read,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 79ab0a6..537fb8c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -51,7 +51,7 @@
#include "iwl-agn-debugfs.h"
/* Highest firmware API version supported */
-#define IWL5000_UCODE_API_MAX 2
+#define IWL5000_UCODE_API_MAX 5
#define IWL5150_UCODE_API_MAX 2
/* Lowest firmware API version supported */
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 1eacba4..0494d7b 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -199,6 +199,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
while (i != idx) {
u16 len;
struct sk_buff *skb;
+ dma_addr_t dma_addr;
desc = &ring[i];
len = le16_to_cpu(desc->len);
skb = rx_buf[i];
@@ -216,17 +217,20 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
len = priv->common.rx_mtu;
}
+ dma_addr = le32_to_cpu(desc->host_addr);
+ pci_dma_sync_single_for_cpu(priv->pdev, dma_addr,
+ priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
skb_put(skb, len);
if (p54_rx(dev, skb)) {
- pci_unmap_single(priv->pdev,
- le32_to_cpu(desc->host_addr),
- priv->common.rx_mtu + 32,
- PCI_DMA_FROMDEVICE);
+ pci_unmap_single(priv->pdev, dma_addr,
+ priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
rx_buf[i] = NULL;
- desc->host_addr = 0;
+ desc->host_addr = cpu_to_le32(0);
} else {
skb_trim(skb, 0);
+ pci_dma_sync_single_for_device(priv->pdev, dma_addr,
+ priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
}
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 21713a7..9b344a9 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -98,6 +98,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
{USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
{USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
{USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */
+ {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */
{USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
{USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */
{USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 848cc2c..518542b 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2597,6 +2597,9 @@ static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
__le32 mode;
int ret;
+ if (priv->device_type != RNDIS_BCM4320B)
+ return -ENOTSUPP;
+
netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__,
enabled ? "enabled" : "disabled",
timeout);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index aa97971..3b3f1e4 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -652,6 +652,12 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
*/
rxdesc->flags |= RX_FLAG_IV_STRIPPED;
+ /*
+ * The hardware has already checked the Michael Mic and has
+ * stripped it from the frame. Signal this to mac80211.
+ */
+ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
rxdesc->flags |= RX_FLAG_DECRYPTED;
else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
@@ -1065,6 +1071,8 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) },
#endif
#ifdef CONFIG_RT2800PCI_RT35XX
+ { PCI_DEVICE(0x1432, 0x7711), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1432, 0x7722), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index b97a4a5..197a36c 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -486,6 +486,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
*/
rxdesc->flags |= RX_FLAG_IV_STRIPPED;
+ /*
+ * The hardware has already checked the Michael Mic and has
+ * stripped it from the frame. Signal this to mac80211.
+ */
+ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
rxdesc->flags |= RX_FLAG_DECRYPTED;
else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index ffedfd4..ea15800 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -3,7 +3,7 @@
#
menuconfig NFC_DEVICES
- bool "NFC devices"
+ bool "Near Field Communication (NFC) devices"
default n
---help---
You'll have to say Y if your computer contains an NFC device that
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c
index bae6472..724f65d 100644
--- a/drivers/nfc/pn544.c
+++ b/drivers/nfc/pn544.c
@@ -60,7 +60,7 @@ enum pn544_irq {
struct pn544_info {
struct miscdevice miscdev;
struct i2c_client *i2c_dev;
- struct regulator_bulk_data regs[2];
+ struct regulator_bulk_data regs[3];
enum pn544_state state;
wait_queue_head_t read_wait;
@@ -74,6 +74,7 @@ struct pn544_info {
static const char reg_vdd_io[] = "Vdd_IO";
static const char reg_vbat[] = "VBat";
+static const char reg_vsim[] = "VSim";
/* sysfs interface */
static ssize_t pn544_test(struct device *dev,
@@ -740,6 +741,7 @@ static int __devinit pn544_probe(struct i2c_client *client,
info->regs[0].supply = reg_vdd_io;
info->regs[1].supply = reg_vbat;
+ info->regs[2].supply = reg_vsim;
r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs),
info->regs);
if (r < 0)
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 3c6e100..d06a637 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -69,4 +69,10 @@ config OF_MDIO
help
OpenFirmware MDIO bus (Ethernet PHY) accessors
+config OF_PCI
+ def_tristate PCI
+ depends on PCI && (PPC || MICROBLAZE || X86)
+ help
+ OpenFirmware PCI bus accessors
+
endmenu # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 3ab21a0..f7861ed 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_OF_I2C) += of_i2c.o
obj-$(CONFIG_OF_NET) += of_net.o
obj-$(CONFIG_OF_SPI) += of_spi.o
obj-$(CONFIG_OF_MDIO) += of_mdio.o
+obj-$(CONFIG_OF_PCI) += of_pci.o
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
new file mode 100644
index 0000000..ac1ec54
--- /dev/null
+++ b/drivers/of/of_pci.c
@@ -0,0 +1,92 @@
+#include <linux/kernel.h>
+#include <linux/of_pci.h>
+#include <linux/of_irq.h>
+#include <asm/prom.h>
+
+/**
+ * of_irq_map_pci - Resolve the interrupt for a PCI device
+ * @pdev: the device whose interrupt is to be resolved
+ * @out_irq: structure of_irq filled by this function
+ *
+ * This function resolves the PCI interrupt for a given PCI device. If a
+ * device-node exists for a given pci_dev, it will use normal OF tree
+ * walking. If not, it will implement standard swizzling and walk up the
+ * PCI tree until an device-node is found, at which point it will finish
+ * resolving using the OF tree walking.
+ */
+int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
+{
+ struct device_node *dn, *ppnode;
+ struct pci_dev *ppdev;
+ u32 lspec;
+ __be32 lspec_be;
+ __be32 laddr[3];
+ u8 pin;
+ int rc;
+
+ /* Check if we have a device node, if yes, fallback to standard
+ * device tree parsing
+ */
+ dn = pci_device_to_OF_node(pdev);
+ if (dn) {
+ rc = of_irq_map_one(dn, 0, out_irq);
+ if (!rc)
+ return rc;
+ }
+
+ /* Ok, we don't, time to have fun. Let's start by building up an
+ * interrupt spec. we assume #interrupt-cells is 1, which is standard
+ * for PCI. If you do different, then don't use that routine.
+ */
+ rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
+ if (rc != 0)
+ return rc;
+ /* No pin, exit */
+ if (pin == 0)
+ return -ENODEV;
+
+ /* Now we walk up the PCI tree */
+ lspec = pin;
+ for (;;) {
+ /* Get the pci_dev of our parent */
+ ppdev = pdev->bus->self;
+
+ /* Ouch, it's a host bridge... */
+ if (ppdev == NULL) {
+ ppnode = pci_bus_to_OF_node(pdev->bus);
+
+ /* No node for host bridge ? give up */
+ if (ppnode == NULL)
+ return -EINVAL;
+ } else {
+ /* We found a P2P bridge, check if it has a node */
+ ppnode = pci_device_to_OF_node(ppdev);
+ }
+
+ /* Ok, we have found a parent with a device-node, hand over to
+ * the OF parsing code.
+ * We build a unit address from the linux device to be used for
+ * resolution. Note that we use the linux bus number which may
+ * not match your firmware bus numbering.
+ * Fortunately, in most cases, interrupt-map-mask doesn't
+ * include the bus number as part of the matching.
+ * You should still be careful about that though if you intend
+ * to rely on this function (you ship a firmware that doesn't
+ * create device nodes for all PCI devices).
+ */
+ if (ppnode)
+ break;
+
+ /* We can only get here if we hit a P2P bridge with no node,
+ * let's do standard swizzling and try again
+ */
+ lspec = pci_swizzle_interrupt_pin(pdev, lspec);
+ pdev = ppdev;
+ }
+
+ lspec_be = cpu_to_be32(lspec);
+ laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
+ laddr[1] = laddr[2] = cpu_to_be32(0);
+ return of_irq_map_raw(ppnode, &lspec_be, 1, laddr, out_irq);
+}
+EXPORT_SYMBOL_GPL(of_irq_map_pci);
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index 28295d0..4d87b5d 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -36,19 +36,55 @@ unsigned int of_pdt_unique_id __initdata;
(p)->unique_id = of_pdt_unique_id++; \
} while (0)
-static inline const char *of_pdt_node_name(struct device_node *dp)
+static char * __init of_pdt_build_full_name(struct device_node *dp)
{
- return dp->path_component_name;
+ int len, ourlen, plen;
+ char *n;
+
+ dp->path_component_name = build_path_component(dp);
+
+ plen = strlen(dp->parent->full_name);
+ ourlen = strlen(dp->path_component_name);
+ len = ourlen + plen + 2;
+
+ n = prom_early_alloc(len);
+ strcpy(n, dp->parent->full_name);
+ if (!of_node_is_root(dp->parent)) {
+ strcpy(n + plen, "/");
+ plen++;
+ }
+ strcpy(n + plen, dp->path_component_name);
+
+ return n;
}
-#else
+#else /* CONFIG_SPARC */
static inline void of_pdt_incr_unique_id(void *p) { }
static inline void irq_trans_init(struct device_node *dp) { }
-static inline const char *of_pdt_node_name(struct device_node *dp)
+static char * __init of_pdt_build_full_name(struct device_node *dp)
{
- return dp->name;
+ static int failsafe_id = 0; /* for generating unique names on failure */
+ char *buf;
+ int len;
+
+ if (of_pdt_prom_ops->pkg2path(dp->phandle, NULL, 0, &len))
+ goto failsafe;
+
+ buf = prom_early_alloc(len + 1);
+ if (of_pdt_prom_ops->pkg2path(dp->phandle, buf, len, &len))
+ goto failsafe;
+ return buf;
+
+ failsafe:
+ buf = prom_early_alloc(strlen(dp->parent->full_name) +
+ strlen(dp->name) + 16);
+ sprintf(buf, "%s/%s@unknown%i",
+ of_node_is_root(dp->parent) ? "" : dp->parent->full_name,
+ dp->name, failsafe_id++);
+ pr_err("%s: pkg2path failed; assigning %s\n", __func__, buf);
+ return buf;
}
#endif /* !CONFIG_SPARC */
@@ -132,47 +168,6 @@ static char * __init of_pdt_get_one_property(phandle node, const char *name)
return buf;
}
-static char * __init of_pdt_try_pkg2path(phandle node)
-{
- char *res, *buf = NULL;
- int len;
-
- if (!of_pdt_prom_ops->pkg2path)
- return NULL;
-
- if (of_pdt_prom_ops->pkg2path(node, buf, 0, &len))
- return NULL;
- buf = prom_early_alloc(len + 1);
- if (of_pdt_prom_ops->pkg2path(node, buf, len, &len)) {
- pr_err("%s: package-to-path failed\n", __func__);
- return NULL;
- }
-
- res = strrchr(buf, '/');
- if (!res) {
- pr_err("%s: couldn't find / in %s\n", __func__, buf);
- return NULL;
- }
- return res+1;
-}
-
-/*
- * When fetching the node's name, first try using package-to-path; if
- * that fails (either because the arch hasn't supplied a PROM callback,
- * or some other random failure), fall back to just looking at the node's
- * 'name' property.
- */
-static char * __init of_pdt_build_name(phandle node)
-{
- char *buf;
-
- buf = of_pdt_try_pkg2path(node);
- if (!buf)
- buf = of_pdt_get_one_property(node, "name");
-
- return buf;
-}
-
static struct device_node * __init of_pdt_create_node(phandle node,
struct device_node *parent)
{
@@ -187,7 +182,7 @@ static struct device_node * __init of_pdt_create_node(phandle node,
kref_init(&dp->kref);
- dp->name = of_pdt_build_name(node);
+ dp->name = of_pdt_get_one_property(node, "name");
dp->type = of_pdt_get_one_property(node, "device_type");
dp->phandle = node;
@@ -198,26 +193,6 @@ static struct device_node * __init of_pdt_create_node(phandle node,
return dp;
}
-static char * __init of_pdt_build_full_name(struct device_node *dp)
-{
- int len, ourlen, plen;
- char *n;
-
- plen = strlen(dp->parent->full_name);
- ourlen = strlen(of_pdt_node_name(dp));
- len = ourlen + plen + 2;
-
- n = prom_early_alloc(len);
- strcpy(n, dp->parent->full_name);
- if (!of_node_is_root(dp->parent)) {
- strcpy(n + plen, "/");
- plen++;
- }
- strcpy(n + plen, of_pdt_node_name(dp));
-
- return n;
-}
-
static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
phandle node,
struct device_node ***nextp)
@@ -240,9 +215,6 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
*(*nextp) = dp;
*nextp = &dp->allnext;
-#if defined(CONFIG_SPARC)
- dp->path_component_name = build_path_component(dp);
-#endif
dp->full_name = of_pdt_build_full_name(dp);
dp->child = of_pdt_build_tree(dp,
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 88246dd..d86ea8b 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -431,7 +431,7 @@ static void pci_device_shutdown(struct device *dev)
pci_msix_shutdown(pci_dev);
}
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
/* Auxiliary functions used for system resume and run-time resume. */
@@ -1059,7 +1059,7 @@ static int pci_pm_runtime_idle(struct device *dev)
#endif /* !CONFIG_PM_RUNTIME */
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
const struct dev_pm_ops pci_dev_pm_ops = {
.prepare = pci_pm_prepare,
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 3a5a6fc..492b7d8 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -243,7 +243,7 @@ struct pci_ops pcifront_bus_ops = {
#ifdef CONFIG_PCI_MSI
static int pci_frontend_enable_msix(struct pci_dev *dev,
- int **vector, int nvec)
+ int vector[], int nvec)
{
int err;
int i;
@@ -277,18 +277,24 @@ static int pci_frontend_enable_msix(struct pci_dev *dev,
if (likely(!err)) {
if (likely(!op.value)) {
/* we get the result */
- for (i = 0; i < nvec; i++)
- *(*vector+i) = op.msix_entries[i].vector;
- return 0;
+ for (i = 0; i < nvec; i++) {
+ if (op.msix_entries[i].vector <= 0) {
+ dev_warn(&dev->dev, "MSI-X entry %d is invalid: %d!\n",
+ i, op.msix_entries[i].vector);
+ err = -EINVAL;
+ vector[i] = -1;
+ continue;
+ }
+ vector[i] = op.msix_entries[i].vector;
+ }
} else {
printk(KERN_DEBUG "enable msix get value %x\n",
op.value);
- return op.value;
}
} else {
dev_err(&dev->dev, "enable msix get err %x\n", err);
- return err;
}
+ return err;
}
static void pci_frontend_disable_msix(struct pci_dev *dev)
@@ -310,7 +316,7 @@ static void pci_frontend_disable_msix(struct pci_dev *dev)
dev_err(&dev->dev, "pci_disable_msix get err %x\n", err);
}
-static int pci_frontend_enable_msi(struct pci_dev *dev, int **vector)
+static int pci_frontend_enable_msi(struct pci_dev *dev, int vector[])
{
int err;
struct xen_pci_op op = {
@@ -324,7 +330,13 @@ static int pci_frontend_enable_msi(struct pci_dev *dev, int **vector)
err = do_pci_op(pdev, &op);
if (likely(!err)) {
- *(*vector) = op.value;
+ vector[0] = op.value;
+ if (op.value <= 0) {
+ dev_warn(&dev->dev, "MSI entry is invalid: %d!\n",
+ op.value);
+ err = -EINVAL;
+ vector[0] = -1;
+ }
} else {
dev_err(&dev->dev, "pci frontend enable msi failed for dev "
"%x:%x\n", op.bus, op.devfn);
@@ -733,8 +745,7 @@ static void free_pdev(struct pcifront_device *pdev)
pcifront_free_roots(pdev);
- /*For PCIE_AER error handling job*/
- flush_scheduled_work();
+ cancel_work_sync(&pdev->op_work);
if (pdev->irq >= 0)
unbind_from_irqhandler(pdev->irq, pdev);
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 0bdda5b3..42fbf1a 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -518,6 +518,8 @@ int pcmcia_enable_device(struct pcmcia_device *p_dev)
flags |= CONF_ENABLE_IOCARD;
if (flags & CONF_ENABLE_IOCARD)
s->socket.flags |= SS_IOCARD;
+ if (flags & CONF_ENABLE_ZVCARD)
+ s->socket.flags |= SS_ZVCARD | SS_IOCARD;
if (flags & CONF_ENABLE_SPKR) {
s->socket.flags |= SS_SPKR_ENA;
status = CCSR_AUDIO_ENA;
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 3755e7c..2c54054 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -215,7 +215,7 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
}
#endif
-static void pxa2xx_configure_sockets(struct device *dev)
+void pxa2xx_configure_sockets(struct device *dev)
{
struct pcmcia_low_level *ops = dev->platform_data;
/*
diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h
index bb62ea8..b609b45 100644
--- a/drivers/pcmcia/pxa2xx_base.h
+++ b/drivers/pcmcia/pxa2xx_base.h
@@ -1,3 +1,4 @@
int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
+void pxa2xx_configure_sockets(struct device *dev);
diff --git a/drivers/pcmcia/pxa2xx_colibri.c b/drivers/pcmcia/pxa2xx_colibri.c
index c3f7219..a520395 100644
--- a/drivers/pcmcia/pxa2xx_colibri.c
+++ b/drivers/pcmcia/pxa2xx_colibri.c
@@ -181,6 +181,9 @@ static int __init colibri_pcmcia_init(void)
{
int ret;
+ if (!machine_is_colibri() && !machine_is_colibri320())
+ return -ENODEV;
+
colibri_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
if (!colibri_pcmcia_device)
return -ENOMEM;
diff --git a/drivers/pcmcia/pxa2xx_lubbock.c b/drivers/pcmcia/pxa2xx_lubbock.c
index b9f8c8f..25afe63 100644
--- a/drivers/pcmcia/pxa2xx_lubbock.c
+++ b/drivers/pcmcia/pxa2xx_lubbock.c
@@ -226,6 +226,7 @@ int pcmcia_lubbock_init(struct sa1111_dev *sadev)
lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
+ pxa2xx_configure_sockets(&sadev->dev);
ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
pxa2xx_drv_pcmcia_add_one);
}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index d163bc2..a59af5b 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -227,7 +227,7 @@ config SONYPI_COMPAT
config IDEAPAD_LAPTOP
tristate "Lenovo IdeaPad Laptop Extras"
depends on ACPI
- depends on RFKILL
+ depends on RFKILL && INPUT
select INPUT_SPARSEKMAP
help
This is a driver for the rfkill switches on Lenovo IdeaPad netbooks.
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index c5c4b8c..38b34a7 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -84,7 +84,7 @@ MODULE_LICENSE("GPL");
*/
#define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB"
#define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C"
-#define WMID_GUID1 "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3"
+#define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3"
#define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A"
#define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531"
@@ -1280,7 +1280,7 @@ static ssize_t set_bool_threeg(struct device *dev,
return -EINVAL;
return count;
}
-static DEVICE_ATTR(threeg, S_IWUGO | S_IRUGO | S_IWUSR, show_bool_threeg,
+static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg,
set_bool_threeg);
static ssize_t show_interface(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index 4633fd8..fe49593 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1081,14 +1081,8 @@ static int asus_hotk_add_fs(struct acpi_device *device)
struct proc_dir_entry *proc;
mode_t mode;
- /*
- * If parameter uid or gid is not changed, keep the default setting for
- * our proc entries (-rw-rw-rw-) else, it means we care about security,
- * and then set to -rw-rw----
- */
-
if ((asus_uid == 0) && (asus_gid == 0)) {
- mode = S_IFREG | S_IRUGO | S_IWUGO;
+ mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP;
} else {
mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP;
printk(KERN_WARNING " asus_uid and asus_gid parameters are "
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 34657f9..ad24ef3 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -290,9 +290,12 @@ static int dell_rfkill_set(void *data, bool blocked)
dell_send_request(buffer, 17, 11);
/* If the hardware switch controls this radio, and the hardware
- switch is disabled, don't allow changing the software state */
+ switch is disabled, don't allow changing the software state.
+ If the hardware switch is reported as not supported, always
+ fire the SMI to toggle the killswitch. */
if ((hwswitch_state & BIT(hwswitch_bit)) &&
- !(buffer->output[1] & BIT(16))) {
+ !(buffer->output[1] & BIT(16)) &&
+ (buffer->output[1] & BIT(0))) {
ret = -EINVAL;
goto out;
}
@@ -398,6 +401,23 @@ static const struct file_operations dell_debugfs_fops = {
static void dell_update_rfkill(struct work_struct *ignored)
{
+ int status;
+
+ get_buffer();
+ dell_send_request(buffer, 17, 11);
+ status = buffer->output[1];
+ release_buffer();
+
+ /* if hardware rfkill is not supported, set it explicitly */
+ if (!(status & BIT(0))) {
+ if (wifi_rfkill)
+ dell_rfkill_set((void *)1, !((status & BIT(17)) >> 17));
+ if (bluetooth_rfkill)
+ dell_rfkill_set((void *)2, !((status & BIT(18)) >> 18));
+ if (wwan_rfkill)
+ dell_rfkill_set((void *)3, !((status & BIT(19)) >> 19));
+ }
+
if (wifi_rfkill)
dell_rfkill_query(wifi_rfkill, (void *)1);
if (bluetooth_rfkill)
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index 930e627..61433d4 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -60,69 +60,20 @@ enum pmic_gpio_register {
#define GPOSW_DOU 0x08
#define GPOSW_RDRV 0x30
+#define GPIO_UPDATE_TYPE 0x80000000
#define NUM_GPIO 24
-struct pmic_gpio_irq {
- spinlock_t lock;
- u32 trigger[NUM_GPIO];
- u32 dirty;
- struct work_struct work;
-};
-
-
struct pmic_gpio {
+ struct mutex buslock;
struct gpio_chip chip;
- struct pmic_gpio_irq irqtypes;
void *gpiointr;
int irq;
unsigned irq_base;
+ unsigned int update_type;
+ u32 trigger_type;
};
-static void pmic_program_irqtype(int gpio, int type)
-{
- if (type & IRQ_TYPE_EDGE_RISING)
- intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20);
- else
- intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20);
-
- if (type & IRQ_TYPE_EDGE_FALLING)
- intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10);
- else
- intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10);
-};
-
-static void pmic_irqtype_work(struct work_struct *work)
-{
- struct pmic_gpio_irq *t =
- container_of(work, struct pmic_gpio_irq, work);
- unsigned long flags;
- int i;
- u16 type;
-
- spin_lock_irqsave(&t->lock, flags);
- /* As we drop the lock, we may need multiple scans if we race the
- pmic_irq_type function */
- while (t->dirty) {
- /*
- * For each pin that has the dirty bit set send an IPC
- * message to configure the hardware via the PMIC
- */
- for (i = 0; i < NUM_GPIO; i++) {
- if (!(t->dirty & (1 << i)))
- continue;
- t->dirty &= ~(1 << i);
- /* We can't trust the array entry or dirty
- once the lock is dropped */
- type = t->trigger[i];
- spin_unlock_irqrestore(&t->lock, flags);
- pmic_program_irqtype(i, type);
- spin_lock_irqsave(&t->lock, flags);
- }
- }
- spin_unlock_irqrestore(&t->lock, flags);
-}
-
static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
if (offset > 8) {
@@ -190,25 +141,24 @@ static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
1 << (offset - 16));
}
-static int pmic_irq_type(unsigned irq, unsigned type)
+/*
+ * This is called from genirq with pg->buslock locked and
+ * irq_desc->lock held. We can not access the scu bus here, so we
+ * store the change and update in the bus_sync_unlock() function below
+ */
+static int pmic_irq_type(struct irq_data *data, unsigned type)
{
- struct pmic_gpio *pg = get_irq_chip_data(irq);
- u32 gpio = irq - pg->irq_base;
- unsigned long flags;
+ struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
+ u32 gpio = data->irq - pg->irq_base;
if (gpio >= pg->chip.ngpio)
return -EINVAL;
- spin_lock_irqsave(&pg->irqtypes.lock, flags);
- pg->irqtypes.trigger[gpio] = type;
- pg->irqtypes.dirty |= (1 << gpio);
- spin_unlock_irqrestore(&pg->irqtypes.lock, flags);
- schedule_work(&pg->irqtypes.work);
+ pg->trigger_type = type;
+ pg->update_type = gpio | GPIO_UPDATE_TYPE;
return 0;
}
-
-
static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip);
@@ -217,38 +167,32 @@ static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
}
/* the gpiointr register is read-clear, so just do nothing. */
-static void pmic_irq_unmask(unsigned irq)
-{
-};
+static void pmic_irq_unmask(struct irq_data *data) { }
-static void pmic_irq_mask(unsigned irq)
-{
-};
+static void pmic_irq_mask(struct irq_data *data) { }
static struct irq_chip pmic_irqchip = {
.name = "PMIC-GPIO",
- .mask = pmic_irq_mask,
- .unmask = pmic_irq_unmask,
- .set_type = pmic_irq_type,
+ .irq_mask = pmic_irq_mask,
+ .irq_unmask = pmic_irq_unmask,
+ .irq_set_type = pmic_irq_type,
};
-static void pmic_irq_handler(unsigned irq, struct irq_desc *desc)
+static irqreturn_t pmic_irq_handler(int irq, void *data)
{
- struct pmic_gpio *pg = (struct pmic_gpio *)get_irq_data(irq);
+ struct pmic_gpio *pg = data;
u8 intsts = *((u8 *)pg->gpiointr + 4);
int gpio;
+ irqreturn_t ret = IRQ_NONE;
for (gpio = 0; gpio < 8; gpio++) {
if (intsts & (1 << gpio)) {
pr_debug("pmic pin %d triggered\n", gpio);
generic_handle_irq(pg->irq_base + gpio);
+ ret = IRQ_HANDLED;
}
}
-
- if (desc->chip->irq_eoi)
- desc->chip->irq_eoi(irq_get_irq_data(irq));
- else
- dev_warn(pg->chip.dev, "missing EOI handler for irq %d\n", irq);
+ return ret;
}
static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
@@ -297,8 +241,7 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
pg->chip.can_sleep = 1;
pg->chip.dev = dev;
- INIT_WORK(&pg->irqtypes.work, pmic_irqtype_work);
- spin_lock_init(&pg->irqtypes.lock);
+ mutex_init(&pg->buslock);
pg->chip.dev = dev;
retval = gpiochip_add(&pg->chip);
@@ -306,8 +249,13 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__);
goto err;
}
- set_irq_data(pg->irq, pg);
- set_irq_chained_handler(pg->irq, pmic_irq_handler);
+
+ retval = request_irq(pg->irq, pmic_irq_handler, 0, "pmic", pg);
+ if (retval) {
+ printk(KERN_WARNING "pmic: Interrupt request failed\n");
+ goto err;
+ }
+
for (i = 0; i < 8; i++) {
set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip,
handle_simple_irq, "demux");
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index 1fe0f1f..865ef78 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -162,7 +162,7 @@ set_bool_##value(struct device *dev, struct device_attribute *attr, \
return -EINVAL; \
return count; \
} \
-static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \
+static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \
show_bool_##value, set_bool_##value);
show_set_bool(wireless, TC1100_INSTANCE_WIRELESS);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index dd59958..eb99223 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -2275,16 +2275,12 @@ static void tpacpi_input_send_key(const unsigned int scancode)
if (keycode != KEY_RESERVED) {
mutex_lock(&tpacpi_inputdev_send_mutex);
+ input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode);
input_report_key(tpacpi_inputdev, keycode, 1);
- if (keycode == KEY_UNKNOWN)
- input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
- scancode);
input_sync(tpacpi_inputdev);
+ input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode);
input_report_key(tpacpi_inputdev, keycode, 0);
- if (keycode == KEY_UNKNOWN)
- input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
- scancode);
input_sync(tpacpi_inputdev);
mutex_unlock(&tpacpi_inputdev_send_mutex);
diff --git a/drivers/pps/generators/Kconfig b/drivers/pps/generators/Kconfig
index f3a73dd..e4c4f3d 100644
--- a/drivers/pps/generators/Kconfig
+++ b/drivers/pps/generators/Kconfig
@@ -6,7 +6,7 @@ comment "PPS generators support"
config PPS_GENERATOR_PARPORT
tristate "Parallel port PPS signal generator"
- depends on PARPORT
+ depends on PARPORT && BROKEN
help
If you say yes here you get support for a PPS signal generator which
utilizes STROBE pin of a parallel port to send PPS signals. It uses
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index cba1b43..a4e8eb9 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -168,7 +168,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
{
unsigned long flags;
int captured = 0;
- struct pps_ktime ts_real;
+ struct pps_ktime ts_real = { .sec = 0, .nsec = 0, .flags = 0 };
/* check event type */
BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 76b4185..1269fbd 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -77,9 +77,9 @@ rio_read_config(struct file *filp, struct kobject *kobj,
/* Several chips lock up trying to read undefined config space */
if (capable(CAP_SYS_ADMIN))
- size = 0x200000;
+ size = RIO_MAINT_SPACE_SZ;
- if (off > size)
+ if (off >= size)
return 0;
if (off + count > size) {
size -= off;
@@ -147,10 +147,10 @@ rio_write_config(struct file *filp, struct kobject *kobj,
loff_t init_off = off;
u8 *data = (u8 *) buf;
- if (off > 0x200000)
+ if (off >= RIO_MAINT_SPACE_SZ)
return 0;
- if (off + count > 0x200000) {
- size = 0x200000 - off;
+ if (off + count > RIO_MAINT_SPACE_SZ) {
+ size = RIO_MAINT_SPACE_SZ - off;
count = size;
}
@@ -200,7 +200,7 @@ static struct bin_attribute rio_config_attr = {
.name = "config",
.mode = S_IRUGO | S_IWUSR,
},
- .size = 0x200000,
+ .size = RIO_MAINT_SPACE_SZ,
.read = rio_read_config,
.write = rio_write_config,
};
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index f53d31b..2bb5de1 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -174,7 +174,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
- BUG_ON(val < 0 || val > mc13xxx_regulators[id].desc.n_voltages);
+ BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages);
return mc13xxx_regulators[id].voltages[val];
}
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 8b0d2c4..06df898 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -120,6 +120,7 @@ static unsigned int wm831x_dcdc_get_mode(struct regulator_dev *rdev)
return REGULATOR_MODE_IDLE;
default:
BUG();
+ return -EINVAL;
}
}
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index cdd9719..4941cad 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -97,6 +97,18 @@ config RTC_INTF_DEV
If unsure, say Y.
+config RTC_INTF_DEV_UIE_EMUL
+ bool "RTC UIE emulation on dev interface"
+ depends on RTC_INTF_DEV
+ help
+ Provides an emulation for RTC_UIE if the underlying rtc chip
+ driver does not expose RTC_UIE ioctls. Those requests generate
+ once-per-second update interrupts, used for synchronization.
+
+ The emulation code will read the time from the hardware
+ clock several times per second, please enable this option
+ only if you know that you really need it.
+
config RTC_DRV_TEST
tristate "Test driver/device"
help
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index c404b61..09b4437 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -117,6 +117,7 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
struct module *owner)
{
struct rtc_device *rtc;
+ struct rtc_wkalrm alrm;
int id, err;
if (idr_pre_get(&rtc_idr, GFP_KERNEL) == 0) {
@@ -166,6 +167,12 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
rtc->pie_timer.function = rtc_pie_update_irq;
rtc->pie_enabled = 0;
+ /* Check to see if there is an ALARM already set in hw */
+ err = __rtc_read_alarm(rtc, &alrm);
+
+ if (!err && !rtc_valid_tm(&alrm.time))
+ rtc_set_alarm(rtc, &alrm);
+
strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE);
dev_set_name(&rtc->dev, "rtc%d", id);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index a0c0196..8ec6b06 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -116,6 +116,186 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
}
EXPORT_SYMBOL_GPL(rtc_set_mmss);
+static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+{
+ int err;
+
+ err = mutex_lock_interruptible(&rtc->ops_lock);
+ if (err)
+ return err;
+
+ if (rtc->ops == NULL)
+ err = -ENODEV;
+ else if (!rtc->ops->read_alarm)
+ err = -EINVAL;
+ else {
+ memset(alarm, 0, sizeof(struct rtc_wkalrm));
+ err = rtc->ops->read_alarm(rtc->dev.parent, alarm);
+ }
+
+ mutex_unlock(&rtc->ops_lock);
+ return err;
+}
+
+int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+{
+ int err;
+ struct rtc_time before, now;
+ int first_time = 1;
+ unsigned long t_now, t_alm;
+ enum { none, day, month, year } missing = none;
+ unsigned days;
+
+ /* The lower level RTC driver may return -1 in some fields,
+ * creating invalid alarm->time values, for reasons like:
+ *
+ * - The hardware may not be capable of filling them in;
+ * many alarms match only on time-of-day fields, not
+ * day/month/year calendar data.
+ *
+ * - Some hardware uses illegal values as "wildcard" match
+ * values, which non-Linux firmware (like a BIOS) may try
+ * to set up as e.g. "alarm 15 minutes after each hour".
+ * Linux uses only oneshot alarms.
+ *
+ * When we see that here, we deal with it by using values from
+ * a current RTC timestamp for any missing (-1) values. The
+ * RTC driver prevents "periodic alarm" modes.
+ *
+ * But this can be racey, because some fields of the RTC timestamp
+ * may have wrapped in the interval since we read the RTC alarm,
+ * which would lead to us inserting inconsistent values in place
+ * of the -1 fields.
+ *
+ * Reading the alarm and timestamp in the reverse sequence
+ * would have the same race condition, and not solve the issue.
+ *
+ * So, we must first read the RTC timestamp,
+ * then read the RTC alarm value,
+ * and then read a second RTC timestamp.
+ *
+ * If any fields of the second timestamp have changed
+ * when compared with the first timestamp, then we know
+ * our timestamp may be inconsistent with that used by
+ * the low-level rtc_read_alarm_internal() function.
+ *
+ * So, when the two timestamps disagree, we just loop and do
+ * the process again to get a fully consistent set of values.
+ *
+ * This could all instead be done in the lower level driver,
+ * but since more than one lower level RTC implementation needs it,
+ * then it's probably best best to do it here instead of there..
+ */
+
+ /* Get the "before" timestamp */
+ err = rtc_read_time(rtc, &before);
+ if (err < 0)
+ return err;
+ do {
+ if (!first_time)
+ memcpy(&before, &now, sizeof(struct rtc_time));
+ first_time = 0;
+
+ /* get the RTC alarm values, which may be incomplete */
+ err = rtc_read_alarm_internal(rtc, alarm);
+ if (err)
+ return err;
+
+ /* full-function RTCs won't have such missing fields */
+ if (rtc_valid_tm(&alarm->time) == 0)
+ return 0;
+
+ /* get the "after" timestamp, to detect wrapped fields */
+ err = rtc_read_time(rtc, &now);
+ if (err < 0)
+ return err;
+
+ /* note that tm_sec is a "don't care" value here: */
+ } while ( before.tm_min != now.tm_min
+ || before.tm_hour != now.tm_hour
+ || before.tm_mon != now.tm_mon
+ || before.tm_year != now.tm_year);
+
+ /* Fill in the missing alarm fields using the timestamp; we
+ * know there's at least one since alarm->time is invalid.
+ */
+ if (alarm->time.tm_sec == -1)
+ alarm->time.tm_sec = now.tm_sec;
+ if (alarm->time.tm_min == -1)
+ alarm->time.tm_min = now.tm_min;
+ if (alarm->time.tm_hour == -1)
+ alarm->time.tm_hour = now.tm_hour;
+
+ /* For simplicity, only support date rollover for now */
+ if (alarm->time.tm_mday == -1) {
+ alarm->time.tm_mday = now.tm_mday;
+ missing = day;
+ }
+ if (alarm->time.tm_mon == -1) {
+ alarm->time.tm_mon = now.tm_mon;
+ if (missing == none)
+ missing = month;
+ }
+ if (alarm->time.tm_year == -1) {
+ alarm->time.tm_year = now.tm_year;
+ if (missing == none)
+ missing = year;
+ }
+
+ /* with luck, no rollover is needed */
+ rtc_tm_to_time(&now, &t_now);
+ rtc_tm_to_time(&alarm->time, &t_alm);
+ if (t_now < t_alm)
+ goto done;
+
+ switch (missing) {
+
+ /* 24 hour rollover ... if it's now 10am Monday, an alarm that
+ * that will trigger at 5am will do so at 5am Tuesday, which
+ * could also be in the next month or year. This is a common
+ * case, especially for PCs.
+ */
+ case day:
+ dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day");
+ t_alm += 24 * 60 * 60;
+ rtc_time_to_tm(t_alm, &alarm->time);
+ break;
+
+ /* Month rollover ... if it's the 31th, an alarm on the 3rd will
+ * be next month. An alarm matching on the 30th, 29th, or 28th
+ * may end up in the month after that! Many newer PCs support
+ * this type of alarm.
+ */
+ case month:
+ dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month");
+ do {
+ if (alarm->time.tm_mon < 11)
+ alarm->time.tm_mon++;
+ else {
+ alarm->time.tm_mon = 0;
+ alarm->time.tm_year++;
+ }
+ days = rtc_month_days(alarm->time.tm_mon,
+ alarm->time.tm_year);
+ } while (days < alarm->time.tm_mday);
+ break;
+
+ /* Year rollover ... easy except for leap years! */
+ case year:
+ dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year");
+ do {
+ alarm->time.tm_year++;
+ } while (rtc_valid_tm(&alarm->time) != 0);
+ break;
+
+ default:
+ dev_warn(&rtc->dev, "alarm rollover not handled\n");
+ }
+
+done:
+ return 0;
+}
+
int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
int err;
@@ -209,9 +389,8 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
}
if (err)
- return err;
-
- if (!rtc->ops)
+ /* nothing */;
+ else if (!rtc->ops)
err = -ENODEV;
else if (!rtc->ops->alarm_irq_enable)
err = -EINVAL;
@@ -229,6 +408,12 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
if (err)
return err;
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+ if (enabled == 0 && rtc->uie_irq_active) {
+ mutex_unlock(&rtc->ops_lock);
+ return rtc_dev_update_irq_enable_emul(rtc, 0);
+ }
+#endif
/* make sure we're changing state */
if (rtc->uie_rtctimer.enabled == enabled)
goto out;
@@ -248,6 +433,16 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
out:
mutex_unlock(&rtc->ops_lock);
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+ /*
+ * Enable emulation if the driver did not provide
+ * the update_irq_enable function pointer or if returned
+ * -EINVAL to signal that it has been configured without
+ * interrupts or that are not available at the moment.
+ */
+ if (err == -EINVAL)
+ err = rtc_dev_update_irq_enable_emul(rtc, enabled);
+#endif
return err;
}
@@ -263,7 +458,7 @@ EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
*
* Triggers the registered irq_task function callback.
*/
-static void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
+void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
{
unsigned long flags;
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 26d1cf5..518a76e 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -183,33 +183,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
return 0;
}
-/*
- * Handle commands from user-space
- */
-static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
- unsigned long arg)
-{
- int ret = 0;
-
- pr_debug("%s(): cmd=%08x, arg=%08lx.\n", __func__, cmd, arg);
-
- /* important: scrub old status before enabling IRQs */
- switch (cmd) {
- case RTC_UIE_OFF: /* update off */
- at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV);
- break;
- case RTC_UIE_ON: /* update on */
- at91_sys_write(AT91_RTC_SCCR, AT91_RTC_SECEV);
- at91_sys_write(AT91_RTC_IER, AT91_RTC_SECEV);
- break;
- default:
- ret = -ENOIOCTLCMD;
- break;
- }
-
- return ret;
-}
-
static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
pr_debug("%s(): cmd=%08x\n", __func__, enabled);
@@ -269,7 +242,6 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
}
static const struct rtc_class_ops at91_rtc_ops = {
- .ioctl = at91_rtc_ioctl,
.read_time = at91_rtc_readtime,
.set_time = at91_rtc_settime,
.read_alarm = at91_rtc_readalarm,
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index c36749e..a3ad957 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -216,33 +216,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
return 0;
}
-/*
- * Handle commands from user-space
- */
-static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
- unsigned long arg)
-{
- struct sam9_rtc *rtc = dev_get_drvdata(dev);
- int ret = 0;
- u32 mr = rtt_readl(rtc, MR);
-
- dev_dbg(dev, "ioctl: cmd=%08x, arg=%08lx, mr %08x\n", cmd, arg, mr);
-
- switch (cmd) {
- case RTC_UIE_OFF: /* update off */
- rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
- break;
- case RTC_UIE_ON: /* update on */
- rtt_writel(rtc, MR, mr | AT91_RTT_RTTINCIEN);
- break;
- default:
- ret = -ENOIOCTLCMD;
- break;
- }
-
- return ret;
-}
-
static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct sam9_rtc *rtc = dev_get_drvdata(dev);
@@ -303,13 +276,12 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
}
static const struct rtc_class_ops at91_rtc_ops = {
- .ioctl = at91_rtc_ioctl,
.read_time = at91_rtc_readtime,
.set_time = at91_rtc_settime,
.read_alarm = at91_rtc_readalarm,
.set_alarm = at91_rtc_setalarm,
.proc = at91_rtc_proc,
- .alarm_irq_enabled = at91_rtc_alarm_irq_enable,
+ .alarm_irq_enable = at91_rtc_alarm_irq_enable,
};
/*
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index 17971d9..ca9cff8 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -240,32 +240,6 @@ static void bfin_rtc_int_set_alarm(struct bfin_rtc *rtc)
*/
bfin_rtc_int_set(rtc->rtc_alarm.tm_yday == -1 ? RTC_ISTAT_ALARM : RTC_ISTAT_ALARM_DAY);
}
-static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
-{
- struct bfin_rtc *rtc = dev_get_drvdata(dev);
- int ret = 0;
-
- dev_dbg_stamp(dev);
-
- bfin_rtc_sync_pending(dev);
-
- switch (cmd) {
- case RTC_UIE_ON:
- dev_dbg_stamp(dev);
- bfin_rtc_int_set(RTC_ISTAT_SEC);
- break;
- case RTC_UIE_OFF:
- dev_dbg_stamp(dev);
- bfin_rtc_int_clear(~RTC_ISTAT_SEC);
- break;
-
- default:
- dev_dbg_stamp(dev);
- ret = -ENOIOCTLCMD;
- }
-
- return ret;
-}
static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
@@ -358,7 +332,6 @@ static int bfin_rtc_proc(struct device *dev, struct seq_file *seq)
}
static struct rtc_class_ops bfin_rtc_ops = {
- .ioctl = bfin_rtc_ioctl,
.read_time = bfin_rtc_read_time,
.set_time = bfin_rtc_set_time,
.read_alarm = bfin_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index c7ff8df..911e75c 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -37,6 +37,8 @@
#include <linux/mod_devicetable.h>
#include <linux/log2.h>
#include <linux/pm.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
#include <asm-generic/rtc.h>
@@ -375,50 +377,6 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
return 0;
}
-static int cmos_irq_set_freq(struct device *dev, int freq)
-{
- struct cmos_rtc *cmos = dev_get_drvdata(dev);
- int f;
- unsigned long flags;
-
- if (!is_valid_irq(cmos->irq))
- return -ENXIO;
-
- if (!is_power_of_2(freq))
- return -EINVAL;
- /* 0 = no irqs; 1 = 2^15 Hz ... 15 = 2^0 Hz */
- f = ffs(freq);
- if (f-- > 16)
- return -EINVAL;
- f = 16 - f;
-
- spin_lock_irqsave(&rtc_lock, flags);
- hpet_set_periodic_freq(freq);
- CMOS_WRITE(RTC_REF_CLCK_32KHZ | f, RTC_FREQ_SELECT);
- spin_unlock_irqrestore(&rtc_lock, flags);
-
- return 0;
-}
-
-static int cmos_irq_set_state(struct device *dev, int enabled)
-{
- struct cmos_rtc *cmos = dev_get_drvdata(dev);
- unsigned long flags;
-
- if (!is_valid_irq(cmos->irq))
- return -ENXIO;
-
- spin_lock_irqsave(&rtc_lock, flags);
-
- if (enabled)
- cmos_irq_enable(cmos, RTC_PIE);
- else
- cmos_irq_disable(cmos, RTC_PIE);
-
- spin_unlock_irqrestore(&rtc_lock, flags);
- return 0;
-}
-
static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
@@ -438,25 +396,6 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static int cmos_update_irq_enable(struct device *dev, unsigned int enabled)
-{
- struct cmos_rtc *cmos = dev_get_drvdata(dev);
- unsigned long flags;
-
- if (!is_valid_irq(cmos->irq))
- return -EINVAL;
-
- spin_lock_irqsave(&rtc_lock, flags);
-
- if (enabled)
- cmos_irq_enable(cmos, RTC_UIE);
- else
- cmos_irq_disable(cmos, RTC_UIE);
-
- spin_unlock_irqrestore(&rtc_lock, flags);
- return 0;
-}
-
#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
static int cmos_procfs(struct device *dev, struct seq_file *seq)
@@ -501,10 +440,7 @@ static const struct rtc_class_ops cmos_rtc_ops = {
.read_alarm = cmos_read_alarm,
.set_alarm = cmos_set_alarm,
.proc = cmos_procfs,
- .irq_set_freq = cmos_irq_set_freq,
- .irq_set_state = cmos_irq_set_state,
.alarm_irq_enable = cmos_alarm_irq_enable,
- .update_irq_enable = cmos_update_irq_enable,
};
/*----------------------------------------------------------------*/
@@ -1123,6 +1059,47 @@ static struct pnp_driver cmos_pnp_driver = {
#endif /* CONFIG_PNP */
+#ifdef CONFIG_OF
+static const struct of_device_id of_cmos_match[] = {
+ {
+ .compatible = "motorola,mc146818",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_cmos_match);
+
+static __init void cmos_of_init(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct rtc_time time;
+ int ret;
+ const __be32 *val;
+
+ if (!node)
+ return;
+
+ val = of_get_property(node, "ctrl-reg", NULL);
+ if (val)
+ CMOS_WRITE(be32_to_cpup(val), RTC_CONTROL);
+
+ val = of_get_property(node, "freq-reg", NULL);
+ if (val)
+ CMOS_WRITE(be32_to_cpup(val), RTC_FREQ_SELECT);
+
+ get_rtc_time(&time);
+ ret = rtc_valid_tm(&time);
+ if (ret) {
+ struct rtc_time def_time = {
+ .tm_year = 1,
+ .tm_mday = 1,
+ };
+ set_rtc_time(&def_time);
+ }
+}
+#else
+static inline void cmos_of_init(struct platform_device *pdev) {}
+#define of_cmos_match NULL
+#endif
/*----------------------------------------------------------------*/
/* Platform setup should have set up an RTC device, when PNP is
@@ -1131,6 +1108,7 @@ static struct pnp_driver cmos_pnp_driver = {
static int __init cmos_platform_probe(struct platform_device *pdev)
{
+ cmos_of_init(pdev);
cmos_wake_setup(&pdev->dev);
return cmos_do_probe(&pdev->dev,
platform_get_resource(pdev, IORESOURCE_IO, 0),
@@ -1162,6 +1140,7 @@ static struct platform_driver cmos_platform_driver = {
#ifdef CONFIG_PM
.pm = &cmos_pm_ops,
#endif
+ .of_match_table = of_cmos_match,
}
};
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
index 34647fc..8d46838 100644
--- a/drivers/rtc/rtc-davinci.c
+++ b/drivers/rtc/rtc-davinci.c
@@ -231,10 +231,6 @@ davinci_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
case RTC_WIE_OFF:
rtc_ctrl &= ~PRTCSS_RTC_CTRL_WEN;
break;
- case RTC_UIE_OFF:
- case RTC_UIE_ON:
- ret = -ENOTTY;
- break;
default:
ret = -ENOIOCTLCMD;
}
@@ -473,55 +469,6 @@ static int davinci_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
return 0;
}
-static int davinci_rtc_irq_set_state(struct device *dev, int enabled)
-{
- struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
- unsigned long flags;
- u8 rtc_ctrl;
-
- spin_lock_irqsave(&davinci_rtc_lock, flags);
-
- rtc_ctrl = rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL);
-
- if (enabled) {
- while (rtcss_read(davinci_rtc, PRTCSS_RTC_CTRL)
- & PRTCSS_RTC_CTRL_WDTBUS)
- cpu_relax();
-
- rtc_ctrl |= PRTCSS_RTC_CTRL_TE;
- rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
-
- rtcss_write(davinci_rtc, 0x0, PRTCSS_RTC_CLKC_CNT);
-
- rtc_ctrl |= PRTCSS_RTC_CTRL_TIEN |
- PRTCSS_RTC_CTRL_TMMD |
- PRTCSS_RTC_CTRL_TMRFLG;
- } else
- rtc_ctrl &= ~PRTCSS_RTC_CTRL_TIEN;
-
- rtcss_write(davinci_rtc, rtc_ctrl, PRTCSS_RTC_CTRL);
-
- spin_unlock_irqrestore(&davinci_rtc_lock, flags);
-
- return 0;
-}
-
-static int davinci_rtc_irq_set_freq(struct device *dev, int freq)
-{
- struct davinci_rtc *davinci_rtc = dev_get_drvdata(dev);
- unsigned long flags;
- u16 tmr_counter = (0x8000 >> (ffs(freq) - 1));
-
- spin_lock_irqsave(&davinci_rtc_lock, flags);
-
- rtcss_write(davinci_rtc, tmr_counter & 0xFF, PRTCSS_RTC_TMR0);
- rtcss_write(davinci_rtc, (tmr_counter & 0xFF00) >> 8, PRTCSS_RTC_TMR1);
-
- spin_unlock_irqrestore(&davinci_rtc_lock, flags);
-
- return 0;
-}
-
static struct rtc_class_ops davinci_rtc_ops = {
.ioctl = davinci_rtc_ioctl,
.read_time = davinci_rtc_read_time,
@@ -529,8 +476,6 @@ static struct rtc_class_ops davinci_rtc_ops = {
.alarm_irq_enable = davinci_rtc_alarm_irq_enable,
.read_alarm = davinci_rtc_read_alarm,
.set_alarm = davinci_rtc_set_alarm,
- .irq_set_state = davinci_rtc_irq_set_state,
- .irq_set_freq = davinci_rtc_irq_set_freq,
};
static int __init davinci_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 37c3cc1..d0e06ed 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -46,6 +46,105 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
return err;
}
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+/*
+ * Routine to poll RTC seconds field for change as often as possible,
+ * after first RTC_UIE use timer to reduce polling
+ */
+static void rtc_uie_task(struct work_struct *work)
+{
+ struct rtc_device *rtc =
+ container_of(work, struct rtc_device, uie_task);
+ struct rtc_time tm;
+ int num = 0;
+ int err;
+
+ err = rtc_read_time(rtc, &tm);
+
+ spin_lock_irq(&rtc->irq_lock);
+ if (rtc->stop_uie_polling || err) {
+ rtc->uie_task_active = 0;
+ } else if (rtc->oldsecs != tm.tm_sec) {
+ num = (tm.tm_sec + 60 - rtc->oldsecs) % 60;
+ rtc->oldsecs = tm.tm_sec;
+ rtc->uie_timer.expires = jiffies + HZ - (HZ/10);
+ rtc->uie_timer_active = 1;
+ rtc->uie_task_active = 0;
+ add_timer(&rtc->uie_timer);
+ } else if (schedule_work(&rtc->uie_task) == 0) {
+ rtc->uie_task_active = 0;
+ }
+ spin_unlock_irq(&rtc->irq_lock);
+ if (num)
+ rtc_handle_legacy_irq(rtc, num, RTC_UF);
+}
+static void rtc_uie_timer(unsigned long data)
+{
+ struct rtc_device *rtc = (struct rtc_device *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc->irq_lock, flags);
+ rtc->uie_timer_active = 0;
+ rtc->uie_task_active = 1;
+ if ((schedule_work(&rtc->uie_task) == 0))
+ rtc->uie_task_active = 0;
+ spin_unlock_irqrestore(&rtc->irq_lock, flags);
+}
+
+static int clear_uie(struct rtc_device *rtc)
+{
+ spin_lock_irq(&rtc->irq_lock);
+ if (rtc->uie_irq_active) {
+ rtc->stop_uie_polling = 1;
+ if (rtc->uie_timer_active) {
+ spin_unlock_irq(&rtc->irq_lock);
+ del_timer_sync(&rtc->uie_timer);
+ spin_lock_irq(&rtc->irq_lock);
+ rtc->uie_timer_active = 0;
+ }
+ if (rtc->uie_task_active) {
+ spin_unlock_irq(&rtc->irq_lock);
+ flush_scheduled_work();
+ spin_lock_irq(&rtc->irq_lock);
+ }
+ rtc->uie_irq_active = 0;
+ }
+ spin_unlock_irq(&rtc->irq_lock);
+ return 0;
+}
+
+static int set_uie(struct rtc_device *rtc)
+{
+ struct rtc_time tm;
+ int err;
+
+ err = rtc_read_time(rtc, &tm);
+ if (err)
+ return err;
+ spin_lock_irq(&rtc->irq_lock);
+ if (!rtc->uie_irq_active) {
+ rtc->uie_irq_active = 1;
+ rtc->stop_uie_polling = 0;
+ rtc->oldsecs = tm.tm_sec;
+ rtc->uie_task_active = 1;
+ if (schedule_work(&rtc->uie_task) == 0)
+ rtc->uie_task_active = 0;
+ }
+ rtc->irq_data = 0;
+ spin_unlock_irq(&rtc->irq_lock);
+ return 0;
+}
+
+int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled)
+{
+ if (enabled)
+ return set_uie(rtc);
+ else
+ return clear_uie(rtc);
+}
+EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul);
+
+#endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */
static ssize_t
rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
@@ -387,6 +486,11 @@ void rtc_dev_prepare(struct rtc_device *rtc)
rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id);
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+ INIT_WORK(&rtc->uie_task, rtc_uie_task);
+ setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
+#endif
+
cdev_init(&rtc->char_dev, &rtc_dev_fops);
rtc->char_dev.owner = rtc->owner;
}
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 37268e9..3fffd70 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -397,29 +397,12 @@ static int ds1511_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static int ds1511_rtc_update_irq_enable(struct device *dev,
- unsigned int enabled)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-
- if (pdata->irq <= 0)
- return -EINVAL;
- if (enabled)
- pdata->irqen |= RTC_UF;
- else
- pdata->irqen &= ~RTC_UF;
- ds1511_rtc_update_alarm(pdata);
- return 0;
-}
-
static const struct rtc_class_ops ds1511_rtc_ops = {
.read_time = ds1511_rtc_read_time,
.set_time = ds1511_rtc_set_time,
.read_alarm = ds1511_rtc_read_alarm,
.set_alarm = ds1511_rtc_set_alarm,
.alarm_irq_enable = ds1511_rtc_alarm_irq_enable,
- .update_irq_enable = ds1511_rtc_update_irq_enable,
};
static ssize_t
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index ff432e2..fee41b9 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -227,29 +227,12 @@ static int ds1553_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static int ds1553_rtc_update_irq_enable(struct device *dev,
- unsigned int enabled)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
-
- if (pdata->irq <= 0)
- return -EINVAL;
- if (enabled)
- pdata->irqen |= RTC_UF;
- else
- pdata->irqen &= ~RTC_UF;
- ds1553_rtc_update_alarm(pdata);
- return 0;
-}
-
static const struct rtc_class_ops ds1553_rtc_ops = {
.read_time = ds1553_rtc_read_time,
.set_time = ds1553_rtc_set_time,
.read_alarm = ds1553_rtc_read_alarm,
.set_alarm = ds1553_rtc_set_alarm,
.alarm_irq_enable = ds1553_rtc_alarm_irq_enable,
- .update_irq_enable = ds1553_rtc_update_irq_enable,
};
static ssize_t ds1553_nvram_read(struct file *filp, struct kobject *kobj,
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 23a9ee1..27b7bf6 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -1,7 +1,7 @@
/*
* RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
*
- * Copyright (C) 2009-2010 Freescale Semiconductor.
+ * Copyright (C) 2009-2011 Freescale Semiconductor.
* Author: Jack Lan <jack.lan@freescale.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -141,9 +141,11 @@ static int ds3232_read_time(struct device *dev, struct rtc_time *time)
time->tm_hour = bcd2bin(hour);
}
- time->tm_wday = bcd2bin(week);
+ /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
+ time->tm_wday = bcd2bin(week) - 1;
time->tm_mday = bcd2bin(day);
- time->tm_mon = bcd2bin(month & 0x7F);
+ /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
+ time->tm_mon = bcd2bin(month & 0x7F) - 1;
if (century)
add_century = 100;
@@ -162,9 +164,11 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time)
buf[0] = bin2bcd(time->tm_sec);
buf[1] = bin2bcd(time->tm_min);
buf[2] = bin2bcd(time->tm_hour);
- buf[3] = bin2bcd(time->tm_wday); /* Day of the week */
+ /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
+ buf[3] = bin2bcd(time->tm_wday + 1);
buf[4] = bin2bcd(time->tm_mday); /* Date */
- buf[5] = bin2bcd(time->tm_mon);
+ /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
+ buf[5] = bin2bcd(time->tm_mon + 1);
if (time->tm_year >= 100) {
buf[5] |= 0x80;
buf[6] = bin2bcd(time->tm_year - 100);
@@ -335,23 +339,6 @@ static int ds3232_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static int ds3232_update_irq_enable(struct device *dev, unsigned int enabled)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct ds3232 *ds3232 = i2c_get_clientdata(client);
-
- if (client->irq <= 0)
- return -EINVAL;
-
- if (enabled)
- ds3232->rtc->irq_data |= RTC_UF;
- else
- ds3232->rtc->irq_data &= ~RTC_UF;
-
- ds3232_update_alarm(client);
- return 0;
-}
-
static irqreturn_t ds3232_irq(int irq, void *dev_id)
{
struct i2c_client *client = dev_id;
@@ -402,7 +389,6 @@ static const struct rtc_class_ops ds3232_rtc_ops = {
.read_alarm = ds3232_read_alarm,
.set_alarm = ds3232_set_alarm,
.alarm_irq_enable = ds3232_alarm_irq_enable,
- .update_irq_enable = ds3232_update_irq_enable,
};
static int __devinit ds3232_probe(struct i2c_client *client,
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 2e16f72..b647363 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -168,12 +168,6 @@ static int jz4740_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
return ret;
}
-static int jz4740_rtc_update_irq_enable(struct device *dev, unsigned int enable)
-{
- struct jz4740_rtc *rtc = dev_get_drvdata(dev);
- return jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_1HZ_IRQ, enable);
-}
-
static int jz4740_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
{
struct jz4740_rtc *rtc = dev_get_drvdata(dev);
@@ -185,7 +179,6 @@ static struct rtc_class_ops jz4740_rtc_ops = {
.set_mmss = jz4740_rtc_set_mmss,
.read_alarm = jz4740_rtc_read_alarm,
.set_alarm = jz4740_rtc_set_alarm,
- .update_irq_enable = jz4740_rtc_update_irq_enable,
.alarm_irq_enable = jz4740_rtc_alarm_irq_enable,
};
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
index 5314b15..c420064 100644
--- a/drivers/rtc/rtc-mc13xxx.c
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -282,12 +282,6 @@ static irqreturn_t mc13xxx_rtc_update_handler(int irq, void *dev)
return IRQ_HANDLED;
}
-static int mc13xxx_rtc_update_irq_enable(struct device *dev,
- unsigned int enabled)
-{
- return mc13xxx_rtc_irq_enable(dev, enabled, MC13XXX_IRQ_1HZ);
-}
-
static int mc13xxx_rtc_alarm_irq_enable(struct device *dev,
unsigned int enabled)
{
@@ -300,7 +294,6 @@ static const struct rtc_class_ops mc13xxx_rtc_ops = {
.read_alarm = mc13xxx_rtc_read_alarm,
.set_alarm = mc13xxx_rtc_set_alarm,
.alarm_irq_enable = mc13xxx_rtc_alarm_irq_enable,
- .update_irq_enable = mc13xxx_rtc_update_irq_enable,
};
static irqreturn_t mc13xxx_rtc_reset_handler(int irq, void *dev)
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index dfcdf09..b40c1ff 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -240,32 +240,12 @@ static int mpc5121_rtc_alarm_irq_enable(struct device *dev,
return 0;
}
-static int mpc5121_rtc_update_irq_enable(struct device *dev,
- unsigned int enabled)
-{
- struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
- struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
- int val;
-
- val = in_8(&regs->int_enable);
-
- if (enabled)
- val = (val & ~0x8) | 0x1;
- else
- val &= ~0x1;
-
- out_8(&regs->int_enable, val);
-
- return 0;
-}
-
static const struct rtc_class_ops mpc5121_rtc_ops = {
.read_time = mpc5121_rtc_read_time,
.set_time = mpc5121_rtc_set_time,
.read_alarm = mpc5121_rtc_read_alarm,
.set_alarm = mpc5121_rtc_set_alarm,
.alarm_irq_enable = mpc5121_rtc_alarm_irq_enable,
- .update_irq_enable = mpc5121_rtc_update_irq_enable,
};
static int __devinit mpc5121_rtc_probe(struct platform_device *op,
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index 1db62db..b86bc32 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -62,6 +62,17 @@ static inline int is_intr(u8 rtc_intr)
return rtc_intr & RTC_IRQMASK;
}
+static inline unsigned char vrtc_is_updating(void)
+{
+ unsigned char uip;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ uip = (vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return uip;
+}
+
/*
* rtc_time's year contains the increment over 1900, but vRTC's YEAR
* register can't be programmed to value larger than 0x64, so vRTC
@@ -76,7 +87,7 @@ static int mrst_read_time(struct device *dev, struct rtc_time *time)
{
unsigned long flags;
- if (rtc_is_updating())
+ if (vrtc_is_updating())
mdelay(20);
spin_lock_irqsave(&rtc_lock, flags);
@@ -236,25 +247,6 @@ static int mrst_set_alarm(struct device *dev, struct rtc_wkalrm *t)
return 0;
}
-static int mrst_irq_set_state(struct device *dev, int enabled)
-{
- struct mrst_rtc *mrst = dev_get_drvdata(dev);
- unsigned long flags;
-
- if (!mrst->irq)
- return -ENXIO;
-
- spin_lock_irqsave(&rtc_lock, flags);
-
- if (enabled)
- mrst_irq_enable(mrst, RTC_PIE);
- else
- mrst_irq_disable(mrst, RTC_PIE);
-
- spin_unlock_irqrestore(&rtc_lock, flags);
- return 0;
-}
-
/* Currently, the vRTC doesn't support UIE ON/OFF */
static int mrst_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
@@ -301,7 +293,6 @@ static const struct rtc_class_ops mrst_rtc_ops = {
.read_alarm = mrst_read_alarm,
.set_alarm = mrst_set_alarm,
.proc = mrst_procfs,
- .irq_set_state = mrst_irq_set_state,
.alarm_irq_enable = mrst_rtc_alarm_irq_enable,
};
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 0b06c1e..826ab64 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -274,12 +274,6 @@ static int mxc_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static int mxc_rtc_update_irq_enable(struct device *dev, unsigned int enabled)
-{
- mxc_rtc_irq_enable(dev, RTC_1HZ_BIT, enabled);
- return 0;
-}
-
/*
* This function reads the current RTC time into tm in Gregorian date.
*/
@@ -368,7 +362,6 @@ static struct rtc_class_ops mxc_rtc_ops = {
.read_alarm = mxc_rtc_read_alarm,
.set_alarm = mxc_rtc_set_alarm,
.alarm_irq_enable = mxc_rtc_alarm_irq_enable,
- .update_irq_enable = mxc_rtc_update_irq_enable,
};
static int __init mxc_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c
index ddb0857..781068d 100644
--- a/drivers/rtc/rtc-nuc900.c
+++ b/drivers/rtc/rtc-nuc900.c
@@ -134,20 +134,6 @@ static void nuc900_rtc_bin2bcd(struct device *dev, struct rtc_time *settm,
gettm->bcd_hour = bin2bcd(settm->tm_hour) << 16;
}
-static int nuc900_update_irq_enable(struct device *dev, unsigned int enabled)
-{
- struct nuc900_rtc *rtc = dev_get_drvdata(dev);
-
- if (enabled)
- __raw_writel(__raw_readl(rtc->rtc_reg + REG_RTC_RIER)|
- (TICKINTENB), rtc->rtc_reg + REG_RTC_RIER);
- else
- __raw_writel(__raw_readl(rtc->rtc_reg + REG_RTC_RIER)&
- (~TICKINTENB), rtc->rtc_reg + REG_RTC_RIER);
-
- return 0;
-}
-
static int nuc900_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct nuc900_rtc *rtc = dev_get_drvdata(dev);
@@ -234,7 +220,6 @@ static struct rtc_class_ops nuc900_rtc_ops = {
.read_alarm = nuc900_rtc_read_alarm,
.set_alarm = nuc900_rtc_set_alarm,
.alarm_irq_enable = nuc900_alarm_irq_enable,
- .update_irq_enable = nuc900_update_irq_enable,
};
static int __devinit nuc900_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index b4dbf3a3..de0dd7b 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -135,44 +135,6 @@ static irqreturn_t rtc_irq(int irq, void *rtc)
return IRQ_HANDLED;
}
-#ifdef CONFIG_RTC_INTF_DEV
-
-static int
-omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
-{
- u8 reg;
-
- switch (cmd) {
- case RTC_UIE_OFF:
- case RTC_UIE_ON:
- break;
- default:
- return -ENOIOCTLCMD;
- }
-
- local_irq_disable();
- rtc_wait_not_busy();
- reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
- switch (cmd) {
- /* UIE = Update Interrupt Enable (1/second) */
- case RTC_UIE_OFF:
- reg &= ~OMAP_RTC_INTERRUPTS_IT_TIMER;
- break;
- case RTC_UIE_ON:
- reg |= OMAP_RTC_INTERRUPTS_IT_TIMER;
- break;
- }
- rtc_wait_not_busy();
- rtc_write(reg, OMAP_RTC_INTERRUPTS_REG);
- local_irq_enable();
-
- return 0;
-}
-
-#else
-#define omap_rtc_ioctl NULL
-#endif
-
static int omap_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
u8 reg;
@@ -313,7 +275,6 @@ static int omap_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
}
static struct rtc_class_ops omap_rtc_ops = {
- .ioctl = omap_rtc_ioctl,
.read_time = omap_rtc_read_time,
.set_time = omap_rtc_set_time,
.read_alarm = omap_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-pcap.c b/drivers/rtc/rtc-pcap.c
index 25c0b3f..a633abc 100644
--- a/drivers/rtc/rtc-pcap.c
+++ b/drivers/rtc/rtc-pcap.c
@@ -131,18 +131,12 @@ static int pcap_rtc_alarm_irq_enable(struct device *dev, unsigned int en)
return pcap_rtc_irq_enable(dev, PCAP_IRQ_TODA, en);
}
-static int pcap_rtc_update_irq_enable(struct device *dev, unsigned int en)
-{
- return pcap_rtc_irq_enable(dev, PCAP_IRQ_1HZ, en);
-}
-
static const struct rtc_class_ops pcap_rtc_ops = {
.read_time = pcap_rtc_read_time,
.read_alarm = pcap_rtc_read_alarm,
.set_alarm = pcap_rtc_set_alarm,
.set_mmss = pcap_rtc_set_mmss,
.alarm_irq_enable = pcap_rtc_alarm_irq_enable,
- .update_irq_enable = pcap_rtc_update_irq_enable,
};
static int __devinit pcap_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c
index 16edf94..f90c574 100644
--- a/drivers/rtc/rtc-pcf50633.c
+++ b/drivers/rtc/rtc-pcf50633.c
@@ -106,25 +106,6 @@ pcf50633_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static int
-pcf50633_rtc_update_irq_enable(struct device *dev, unsigned int enabled)
-{
- struct pcf50633_rtc *rtc = dev_get_drvdata(dev);
- int err;
-
- if (enabled)
- err = pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_SECOND);
- else
- err = pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_SECOND);
-
- if (err < 0)
- return err;
-
- rtc->second_enabled = enabled;
-
- return 0;
-}
-
static int pcf50633_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct pcf50633_rtc *rtc;
@@ -262,8 +243,7 @@ static struct rtc_class_ops pcf50633_rtc_ops = {
.set_time = pcf50633_rtc_set_time,
.read_alarm = pcf50633_rtc_read_alarm,
.set_alarm = pcf50633_rtc_set_alarm,
- .alarm_irq_enable = pcf50633_rtc_alarm_irq_enable,
- .update_irq_enable = pcf50633_rtc_update_irq_enable,
+ .alarm_irq_enable = pcf50633_rtc_alarm_irq_enable,
};
static void pcf50633_rtc_irq(int irq, void *data)
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index bbdb2f0..d554368 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -35,11 +35,6 @@ static irqreturn_t pl030_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int pl030_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
-{
- return -ENOIOCTLCMD;
-}
-
static int pl030_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct pl030_rtc *rtc = dev_get_drvdata(dev);
@@ -96,7 +91,6 @@ static int pl030_set_time(struct device *dev, struct rtc_time *tm)
}
static const struct rtc_class_ops pl030_ops = {
- .ioctl = pl030_ioctl,
.read_time = pl030_read_time,
.set_time = pl030_set_time,
.read_alarm = pl030_read_alarm,
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index b7a6690..d829ea6 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -293,57 +293,6 @@ static int pl031_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
return ret;
}
-/* Periodic interrupt is only available in ST variants. */
-static int pl031_irq_set_state(struct device *dev, int enabled)
-{
- struct pl031_local *ldata = dev_get_drvdata(dev);
-
- if (enabled == 1) {
- /* Clear any pending timer interrupt. */
- writel(RTC_BIT_PI, ldata->base + RTC_ICR);
-
- writel(readl(ldata->base + RTC_IMSC) | RTC_BIT_PI,
- ldata->base + RTC_IMSC);
-
- /* Now start the timer */
- writel(readl(ldata->base + RTC_TCR) | RTC_TCR_EN,
- ldata->base + RTC_TCR);
-
- } else {
- writel(readl(ldata->base + RTC_IMSC) & (~RTC_BIT_PI),
- ldata->base + RTC_IMSC);
-
- /* Also stop the timer */
- writel(readl(ldata->base + RTC_TCR) & (~RTC_TCR_EN),
- ldata->base + RTC_TCR);
- }
- /* Wait at least 1 RTC32 clock cycle to ensure next access
- * to RTC_TCR will succeed.
- */
- udelay(40);
-
- return 0;
-}
-
-static int pl031_irq_set_freq(struct device *dev, int freq)
-{
- struct pl031_local *ldata = dev_get_drvdata(dev);
-
- /* Cant set timer if it is already enabled */
- if (readl(ldata->base + RTC_TCR) & RTC_TCR_EN) {
- dev_err(dev, "can't change frequency while timer enabled\n");
- return -EINVAL;
- }
-
- /* If self start bit in RTC_TCR is set timer will start here,
- * but we never set that bit. Instead we start the timer when
- * set_state is called with enabled == 1.
- */
- writel(RTC_TIMER_FREQ / freq, ldata->base + RTC_TLR);
-
- return 0;
-}
-
static int pl031_remove(struct amba_device *adev)
{
struct pl031_local *ldata = dev_get_drvdata(&adev->dev);
@@ -440,8 +389,6 @@ static struct rtc_class_ops stv1_pl031_ops = {
.read_alarm = pl031_read_alarm,
.set_alarm = pl031_set_alarm,
.alarm_irq_enable = pl031_alarm_irq_enable,
- .irq_set_state = pl031_irq_set_state,
- .irq_set_freq = pl031_irq_set_freq,
};
/* And the second ST derivative */
@@ -451,8 +398,6 @@ static struct rtc_class_ops stv2_pl031_ops = {
.read_alarm = pl031_stv2_read_alarm,
.set_alarm = pl031_stv2_set_alarm,
.alarm_irq_enable = pl031_alarm_irq_enable,
- .irq_set_state = pl031_irq_set_state,
- .irq_set_freq = pl031_irq_set_freq,
};
static struct amba_id pl031_ids[] = {
diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c
index 242bbf8..0a59fda 100644
--- a/drivers/rtc/rtc-proc.c
+++ b/drivers/rtc/rtc-proc.c
@@ -69,6 +69,14 @@ static int rtc_proc_show(struct seq_file *seq, void *offset)
alrm.enabled ? "yes" : "no");
seq_printf(seq, "alrm_pending\t: %s\n",
alrm.pending ? "yes" : "no");
+ seq_printf(seq, "update IRQ enabled\t: %s\n",
+ (rtc->uie_rtctimer.enabled) ? "yes" : "no");
+ seq_printf(seq, "periodic IRQ enabled\t: %s\n",
+ (rtc->pie_enabled) ? "yes" : "no");
+ seq_printf(seq, "periodic IRQ frequency\t: %d\n",
+ rtc->irq_freq);
+ seq_printf(seq, "max user IRQ frequency\t: %d\n",
+ rtc->max_user_freq);
}
seq_printf(seq, "24hr\t\t: yes\n");
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index 29e867a..fc9f499 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -209,32 +209,6 @@ static void pxa_rtc_release(struct device *dev)
free_irq(pxa_rtc->irq_1Hz, dev);
}
-static int pxa_periodic_irq_set_freq(struct device *dev, int freq)
-{
- struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
- int period_ms;
-
- if (freq < 1 || freq > MAXFREQ_PERIODIC)
- return -EINVAL;
-
- period_ms = 1000 / freq;
- rtc_writel(pxa_rtc, PIAR, period_ms);
-
- return 0;
-}
-
-static int pxa_periodic_irq_set_state(struct device *dev, int enabled)
-{
- struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
-
- if (enabled)
- rtsr_set_bits(pxa_rtc, RTSR_PIALE | RTSR_PICE);
- else
- rtsr_clear_bits(pxa_rtc, RTSR_PIALE | RTSR_PICE);
-
- return 0;
-}
-
static int pxa_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
@@ -250,21 +224,6 @@ static int pxa_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static int pxa_update_irq_enable(struct device *dev, unsigned int enabled)
-{
- struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
-
- spin_lock_irq(&pxa_rtc->lock);
-
- if (enabled)
- rtsr_set_bits(pxa_rtc, RTSR_HZE);
- else
- rtsr_clear_bits(pxa_rtc, RTSR_HZE);
-
- spin_unlock_irq(&pxa_rtc->lock);
- return 0;
-}
-
static int pxa_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
@@ -346,10 +305,7 @@ static const struct rtc_class_ops pxa_rtc_ops = {
.read_alarm = pxa_rtc_read_alarm,
.set_alarm = pxa_rtc_set_alarm,
.alarm_irq_enable = pxa_alarm_irq_enable,
- .update_irq_enable = pxa_update_irq_enable,
.proc = pxa_rtc_proc,
- .irq_set_state = pxa_periodic_irq_set_state,
- .irq_set_freq = pxa_periodic_irq_set_freq,
};
static int __init pxa_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index 6aaa155..85c1b84 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -281,57 +281,6 @@ static int rs5c372_rtc_set_time(struct device *dev, struct rtc_time *tm)
return rs5c372_set_datetime(to_i2c_client(dev), tm);
}
-#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
-
-static int
-rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct rs5c372 *rs5c = i2c_get_clientdata(client);
- unsigned char buf;
- int status, addr;
-
- buf = rs5c->regs[RS5C_REG_CTRL1];
- switch (cmd) {
- case RTC_UIE_OFF:
- case RTC_UIE_ON:
- /* some 327a modes use a different IRQ pin for 1Hz irqs */
- if (rs5c->type == rtc_rs5c372a
- && (buf & RS5C372A_CTRL1_SL1))
- return -ENOIOCTLCMD;
- default:
- return -ENOIOCTLCMD;
- }
-
- status = rs5c_get_regs(rs5c);
- if (status < 0)
- return status;
-
- addr = RS5C_ADDR(RS5C_REG_CTRL1);
- switch (cmd) {
- case RTC_UIE_OFF: /* update off */
- buf &= ~RS5C_CTRL1_CT_MASK;
- break;
- case RTC_UIE_ON: /* update on */
- buf &= ~RS5C_CTRL1_CT_MASK;
- buf |= RS5C_CTRL1_CT4;
- break;
- }
-
- if (i2c_smbus_write_byte_data(client, addr, buf) < 0) {
- printk(KERN_WARNING "%s: can't update alarm\n",
- rs5c->rtc->name);
- status = -EIO;
- } else
- rs5c->regs[RS5C_REG_CTRL1] = buf;
-
- return status;
-}
-
-#else
-#define rs5c_rtc_ioctl NULL
-#endif
-
static int rs5c_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
@@ -480,7 +429,6 @@ static int rs5c372_rtc_proc(struct device *dev, struct seq_file *seq)
static const struct rtc_class_ops rs5c372_rtc_ops = {
.proc = rs5c372_rtc_proc,
- .ioctl = rs5c_rtc_ioctl,
.read_time = rs5c372_rtc_read_time,
.set_time = rs5c372_rtc_set_time,
.read_alarm = rs5c_read_alarm,
diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
index af32a62..fde172f 100644
--- a/drivers/rtc/rtc-rx8025.c
+++ b/drivers/rtc/rtc-rx8025.c
@@ -424,37 +424,12 @@ static int rx8025_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static int rx8025_irq_set_state(struct device *dev, int enabled)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct rx8025_data *rx8025 = i2c_get_clientdata(client);
- int ctrl1;
- int err;
-
- if (client->irq <= 0)
- return -ENXIO;
-
- ctrl1 = rx8025->ctrl1 & ~RX8025_BIT_CTRL1_CT;
- if (enabled)
- ctrl1 |= RX8025_BIT_CTRL1_CT_1HZ;
- if (ctrl1 != rx8025->ctrl1) {
- rx8025->ctrl1 = ctrl1;
- err = rx8025_write_reg(rx8025->client, RX8025_REG_CTRL1,
- rx8025->ctrl1);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static struct rtc_class_ops rx8025_rtc_ops = {
.read_time = rx8025_get_time,
.set_time = rx8025_set_time,
.read_alarm = rx8025_read_alarm,
.set_alarm = rx8025_set_alarm,
.alarm_irq_enable = rx8025_alarm_irq_enable,
- .irq_set_state = rx8025_irq_set_state,
};
/*
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index cf953ec..7149649 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -77,47 +77,18 @@ static irqreturn_t s3c_rtc_tickirq(int irq, void *id)
}
/* Update control registers */
-static void s3c_rtc_setaie(int to)
+static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
{
unsigned int tmp;
- pr_debug("%s: aie=%d\n", __func__, to);
+ pr_debug("%s: aie=%d\n", __func__, enabled);
tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
- if (to)
+ if (enabled)
tmp |= S3C2410_RTCALM_ALMEN;
writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
-}
-
-static int s3c_rtc_setpie(struct device *dev, int enabled)
-{
- unsigned int tmp;
-
- pr_debug("%s: pie=%d\n", __func__, enabled);
-
- spin_lock_irq(&s3c_rtc_pie_lock);
-
- if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
- tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
- tmp &= ~S3C64XX_RTCCON_TICEN;
-
- if (enabled)
- tmp |= S3C64XX_RTCCON_TICEN;
-
- writew(tmp, s3c_rtc_base + S3C2410_RTCCON);
- } else {
- tmp = readb(s3c_rtc_base + S3C2410_TICNT);
- tmp &= ~S3C2410_TICNT_ENABLE;
-
- if (enabled)
- tmp |= S3C2410_TICNT_ENABLE;
-
- writeb(tmp, s3c_rtc_base + S3C2410_TICNT);
- }
-
- spin_unlock_irq(&s3c_rtc_pie_lock);
return 0;
}
@@ -308,7 +279,7 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
writeb(alrm_en, base + S3C2410_RTCALM);
- s3c_rtc_setaie(alrm->enabled);
+ s3c_rtc_setaie(dev, alrm->enabled);
return 0;
}
@@ -377,8 +348,6 @@ static const struct rtc_class_ops s3c_rtcops = {
.set_time = s3c_rtc_settime,
.read_alarm = s3c_rtc_getalarm,
.set_alarm = s3c_rtc_setalarm,
- .irq_set_freq = s3c_rtc_setfreq,
- .irq_set_state = s3c_rtc_setpie,
.proc = s3c_rtc_proc,
.alarm_irq_enable = s3c_rtc_setaie,
};
@@ -440,7 +409,7 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
rtc_device_unregister(rtc);
s3c_rtc_setpie(&dev->dev, 0);
- s3c_rtc_setaie(0);
+ s3c_rtc_setaie(&dev->dev, 0);
clk_disable(rtc_clk);
clk_put(rtc_clk);
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 5dfe5ff..0b40bb8 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -43,7 +43,6 @@
#define RTC_DEF_TRIM 0
static const unsigned long RTC_FREQ = 1024;
-static unsigned long timer_freq;
static struct rtc_time rtc_alarm;
static DEFINE_SPINLOCK(sa1100_rtc_lock);
@@ -156,114 +155,11 @@ static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int sa1100_irq_set_freq(struct device *dev, int freq)
-{
- if (freq < 1 || freq > timer_freq) {
- return -EINVAL;
- } else {
- struct rtc_device *rtc = (struct rtc_device *)dev;
-
- rtc->irq_freq = freq;
-
- return 0;
- }
-}
-
-static int rtc_timer1_count;
-
-static int sa1100_irq_set_state(struct device *dev, int enabled)
-{
- spin_lock_irq(&sa1100_rtc_lock);
- if (enabled) {
- struct rtc_device *rtc = (struct rtc_device *)dev;
-
- OSMR1 = timer_freq / rtc->irq_freq + OSCR;
- OIER |= OIER_E1;
- rtc_timer1_count = 1;
- } else {
- OIER &= ~OIER_E1;
- }
- spin_unlock_irq(&sa1100_rtc_lock);
-
- return 0;
-}
-
-static inline int sa1100_timer1_retrigger(struct rtc_device *rtc)
-{
- unsigned long diff;
- unsigned long period = timer_freq / rtc->irq_freq;
-
- spin_lock_irq(&sa1100_rtc_lock);
-
- do {
- OSMR1 += period;
- diff = OSMR1 - OSCR;
- /* If OSCR > OSMR1, diff is a very large number (unsigned
- * math). This means we have a lost interrupt. */
- } while (diff > period);
- OIER |= OIER_E1;
-
- spin_unlock_irq(&sa1100_rtc_lock);
-
- return 0;
-}
-
-static irqreturn_t timer1_interrupt(int irq, void *dev_id)
-{
- struct platform_device *pdev = to_platform_device(dev_id);
- struct rtc_device *rtc = platform_get_drvdata(pdev);
-
- /*
- * If we match for the first time, rtc_timer1_count will be 1.
- * Otherwise, we wrapped around (very unlikely but
- * still possible) so compute the amount of missed periods.
- * The match reg is updated only when the data is actually retrieved
- * to avoid unnecessary interrupts.
- */
- OSSR = OSSR_M1; /* clear match on timer1 */
-
- rtc_update_irq(rtc, rtc_timer1_count, RTC_PF | RTC_IRQF);
-
- if (rtc_timer1_count == 1)
- rtc_timer1_count =
- (rtc->irq_freq * ((1 << 30) / (timer_freq >> 2)));
-
- /* retrigger. */
- sa1100_timer1_retrigger(rtc);
-
- return IRQ_HANDLED;
-}
-
-static int sa1100_rtc_read_callback(struct device *dev, int data)
-{
- if (data & RTC_PF) {
- struct rtc_device *rtc = (struct rtc_device *)dev;
-
- /* interpolate missed periods and set match for the next */
- unsigned long period = timer_freq / rtc->irq_freq;
- unsigned long oscr = OSCR;
- unsigned long osmr1 = OSMR1;
- unsigned long missed = (oscr - osmr1)/period;
- data += missed << 8;
- OSSR = OSSR_M1; /* clear match on timer 1 */
- OSMR1 = osmr1 + (missed + 1)*period;
- /* Ensure we didn't miss another match in the mean time.
- * Here we compare (match - OSCR) 8 instead of 0 --
- * see comment in pxa_timer_interrupt() for explanation.
- */
- while ((signed long)((osmr1 = OSMR1) - OSCR) <= 8) {
- data += 0x100;
- OSSR = OSSR_M1; /* clear match on timer 1 */
- OSMR1 = osmr1 + period;
- }
- }
- return data;
-}
-
static int sa1100_rtc_open(struct device *dev)
{
int ret;
- struct rtc_device *rtc = (struct rtc_device *)dev;
+ struct platform_device *plat_dev = to_platform_device(dev);
+ struct rtc_device *rtc = platform_get_drvdata(plat_dev);
ret = request_irq(IRQ_RTC1Hz, sa1100_rtc_interrupt, IRQF_DISABLED,
"rtc 1Hz", dev);
@@ -277,19 +173,11 @@ static int sa1100_rtc_open(struct device *dev)
dev_err(dev, "IRQ %d already in use.\n", IRQ_RTCAlrm);
goto fail_ai;
}
- ret = request_irq(IRQ_OST1, timer1_interrupt, IRQF_DISABLED,
- "rtc timer", dev);
- if (ret) {
- dev_err(dev, "IRQ %d already in use.\n", IRQ_OST1);
- goto fail_pi;
- }
rtc->max_user_freq = RTC_FREQ;
- sa1100_irq_set_freq(dev, RTC_FREQ);
+ rtc_irq_set_freq(rtc, NULL, RTC_FREQ);
return 0;
- fail_pi:
- free_irq(IRQ_RTCAlrm, dev);
fail_ai:
free_irq(IRQ_RTC1Hz, dev);
fail_ui:
@@ -304,30 +192,10 @@ static void sa1100_rtc_release(struct device *dev)
OSSR = OSSR_M1;
spin_unlock_irq(&sa1100_rtc_lock);
- free_irq(IRQ_OST1, dev);
free_irq(IRQ_RTCAlrm, dev);
free_irq(IRQ_RTC1Hz, dev);
}
-
-static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd,
- unsigned long arg)
-{
- switch (cmd) {
- case RTC_UIE_OFF:
- spin_lock_irq(&sa1100_rtc_lock);
- RTSR &= ~RTSR_HZE;
- spin_unlock_irq(&sa1100_rtc_lock);
- return 0;
- case RTC_UIE_ON:
- spin_lock_irq(&sa1100_rtc_lock);
- RTSR |= RTSR_HZE;
- spin_unlock_irq(&sa1100_rtc_lock);
- return 0;
- }
- return -ENOIOCTLCMD;
-}
-
static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
spin_lock_irq(&sa1100_rtc_lock);
@@ -386,31 +254,20 @@ static int sa1100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq)
{
- struct rtc_device *rtc = (struct rtc_device *)dev;
-
- seq_printf(seq, "trim/divider\t: 0x%08x\n", (u32) RTTR);
- seq_printf(seq, "update_IRQ\t: %s\n",
- (RTSR & RTSR_HZE) ? "yes" : "no");
- seq_printf(seq, "periodic_IRQ\t: %s\n",
- (OIER & OIER_E1) ? "yes" : "no");
- seq_printf(seq, "periodic_freq\t: %d\n", rtc->irq_freq);
- seq_printf(seq, "RTSR\t\t: 0x%08x\n", (u32)RTSR);
+ seq_printf(seq, "trim/divider\t\t: 0x%08x\n", (u32) RTTR);
+ seq_printf(seq, "RTSR\t\t\t: 0x%08x\n", (u32)RTSR);
return 0;
}
static const struct rtc_class_ops sa1100_rtc_ops = {
.open = sa1100_rtc_open,
- .read_callback = sa1100_rtc_read_callback,
.release = sa1100_rtc_release,
- .ioctl = sa1100_rtc_ioctl,
.read_time = sa1100_rtc_read_time,
.set_time = sa1100_rtc_set_time,
.read_alarm = sa1100_rtc_read_alarm,
.set_alarm = sa1100_rtc_set_alarm,
.proc = sa1100_rtc_proc,
- .irq_set_freq = sa1100_irq_set_freq,
- .irq_set_state = sa1100_irq_set_state,
.alarm_irq_enable = sa1100_rtc_alarm_irq_enable,
};
@@ -418,8 +275,6 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
- timer_freq = get_clock_tick_rate();
-
/*
* According to the manual we should be able to let RTTR be zero
* and then a default diviser for a 32.768KHz clock is used.
@@ -445,11 +300,6 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
- /* Set the irq_freq */
- /*TODO: Find out who is messing with this value after we initialize
- * it here.*/
- rtc->irq_freq = RTC_FREQ;
-
/* Fix for a nasty initialization problem the in SA11xx RTSR register.
* See also the comments in sa1100_rtc_interrupt().
*
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 93314a9..e55dc1a 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -344,27 +344,6 @@ static inline void sh_rtc_setcie(struct device *dev, unsigned int enable)
spin_unlock_irq(&rtc->lock);
}
-static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
-{
- struct sh_rtc *rtc = dev_get_drvdata(dev);
- unsigned int ret = 0;
-
- switch (cmd) {
- case RTC_UIE_OFF:
- rtc->periodic_freq &= ~PF_OXS;
- sh_rtc_setcie(dev, 0);
- break;
- case RTC_UIE_ON:
- rtc->periodic_freq |= PF_OXS;
- sh_rtc_setcie(dev, 1);
- break;
- default:
- ret = -ENOIOCTLCMD;
- }
-
- return ret;
-}
-
static int sh_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
sh_rtc_setaie(dev, enabled);
@@ -598,13 +577,10 @@ static int sh_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
}
static struct rtc_class_ops sh_rtc_ops = {
- .ioctl = sh_rtc_ioctl,
.read_time = sh_rtc_read_time,
.set_time = sh_rtc_set_time,
.read_alarm = sh_rtc_read_alarm,
.set_alarm = sh_rtc_set_alarm,
- .irq_set_state = sh_rtc_irq_set_state,
- .irq_set_freq = sh_rtc_irq_set_freq,
.proc = sh_rtc_proc,
.alarm_irq_enable = sh_rtc_alarm_irq_enable,
};
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 7e7d0c8..572e953 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -115,19 +115,6 @@ static int stmp3xxx_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static int stmp3xxx_update_irq_enable(struct device *dev, unsigned int enabled)
-{
- struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
-
- if (enabled)
- stmp3xxx_setl(BM_RTC_CTRL_ONEMSEC_IRQ_EN,
- rtc_data->io + HW_RTC_CTRL);
- else
- stmp3xxx_clearl(BM_RTC_CTRL_ONEMSEC_IRQ_EN,
- rtc_data->io + HW_RTC_CTRL);
- return 0;
-}
-
static int stmp3xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
@@ -149,8 +136,6 @@ static int stmp3xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
static struct rtc_class_ops stmp3xxx_rtc_ops = {
.alarm_irq_enable =
stmp3xxx_alarm_irq_enable,
- .update_irq_enable =
- stmp3xxx_update_irq_enable,
.read_time = stmp3xxx_rtc_gettime,
.set_mmss = stmp3xxx_rtc_set_mmss,
.read_alarm = stmp3xxx_rtc_read_alarm,
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index a82d6fe..7e96254b 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -78,11 +78,16 @@ static ssize_t test_irq_store(struct device *dev,
struct rtc_device *rtc = platform_get_drvdata(plat_dev);
retval = count;
- if (strncmp(buf, "tick", 4) == 0)
+ if (strncmp(buf, "tick", 4) == 0 && rtc->pie_enabled)
rtc_update_irq(rtc, 1, RTC_PF | RTC_IRQF);
- else if (strncmp(buf, "alarm", 5) == 0)
- rtc_update_irq(rtc, 1, RTC_AF | RTC_IRQF);
- else if (strncmp(buf, "update", 6) == 0)
+ else if (strncmp(buf, "alarm", 5) == 0) {
+ struct rtc_wkalrm alrm;
+ int err = rtc_read_alarm(rtc, &alrm);
+
+ if (!err && alrm.enabled)
+ rtc_update_irq(rtc, 1, RTC_AF | RTC_IRQF);
+
+ } else if (strncmp(buf, "update", 6) == 0 && rtc->uie_rtctimer.enabled)
rtc_update_irq(rtc, 1, RTC_UF | RTC_IRQF);
else
retval = -EINVAL;
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index ed1b868..f9a2799 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -213,18 +213,6 @@ static int twl_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
return ret;
}
-static int twl_rtc_update_irq_enable(struct device *dev, unsigned enabled)
-{
- int ret;
-
- if (enabled)
- ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
- else
- ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
-
- return ret;
-}
-
/*
* Gets current TWL RTC time and date parameters.
*
@@ -433,7 +421,6 @@ static struct rtc_class_ops twl_rtc_ops = {
.read_alarm = twl_rtc_read_alarm,
.set_alarm = twl_rtc_set_alarm,
.alarm_irq_enable = twl_rtc_alarm_irq_enable,
- .update_irq_enable = twl_rtc_update_irq_enable,
};
/*----------------------------------------------------------------------*/
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index 769190a..c5698cd 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -207,36 +207,6 @@ static int vr41xx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
return 0;
}
-static int vr41xx_rtc_irq_set_freq(struct device *dev, int freq)
-{
- u64 count;
-
- if (!is_power_of_2(freq))
- return -EINVAL;
- count = RTC_FREQUENCY;
- do_div(count, freq);
-
- spin_lock_irq(&rtc_lock);
-
- periodic_count = count;
- rtc1_write(RTCL1LREG, periodic_count);
- rtc1_write(RTCL1HREG, periodic_count >> 16);
-
- spin_unlock_irq(&rtc_lock);
-
- return 0;
-}
-
-static int vr41xx_rtc_irq_set_state(struct device *dev, int enabled)
-{
- if (enabled)
- enable_irq(pie_irq);
- else
- disable_irq(pie_irq);
-
- return 0;
-}
-
static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
@@ -308,8 +278,6 @@ static const struct rtc_class_ops vr41xx_rtc_ops = {
.set_time = vr41xx_rtc_set_time,
.read_alarm = vr41xx_rtc_read_alarm,
.set_alarm = vr41xx_rtc_set_alarm,
- .irq_set_freq = vr41xx_rtc_irq_set_freq,
- .irq_set_state = vr41xx_rtc_irq_set_state,
};
static int __devinit rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index 82931dc..bdc909b 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -315,21 +315,6 @@ static int wm831x_rtc_alarm_irq_enable(struct device *dev,
return wm831x_rtc_stop_alarm(wm831x_rtc);
}
-static int wm831x_rtc_update_irq_enable(struct device *dev,
- unsigned int enabled)
-{
- struct wm831x_rtc *wm831x_rtc = dev_get_drvdata(dev);
- int val;
-
- if (enabled)
- val = 1 << WM831X_RTC_PINT_FREQ_SHIFT;
- else
- val = 0;
-
- return wm831x_set_bits(wm831x_rtc->wm831x, WM831X_RTC_CONTROL,
- WM831X_RTC_PINT_FREQ_MASK, val);
-}
-
static irqreturn_t wm831x_alm_irq(int irq, void *data)
{
struct wm831x_rtc *wm831x_rtc = data;
@@ -354,7 +339,6 @@ static const struct rtc_class_ops wm831x_rtc_ops = {
.read_alarm = wm831x_rtc_readalarm,
.set_alarm = wm831x_rtc_setalarm,
.alarm_irq_enable = wm831x_rtc_alarm_irq_enable,
- .update_irq_enable = wm831x_rtc_update_irq_enable,
};
#ifdef CONFIG_PM
diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
index 3d0dc76..6642142 100644
--- a/drivers/rtc/rtc-wm8350.c
+++ b/drivers/rtc/rtc-wm8350.c
@@ -302,26 +302,6 @@ static int wm8350_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
return ret;
}
-static int wm8350_rtc_update_irq_enable(struct device *dev,
- unsigned int enabled)
-{
- struct wm8350 *wm8350 = dev_get_drvdata(dev);
-
- /* Suppress duplicate changes since genirq nests enable and
- * disable calls. */
- if (enabled == wm8350->rtc.update_enabled)
- return 0;
-
- if (enabled)
- wm8350_unmask_irq(wm8350, WM8350_IRQ_RTC_SEC);
- else
- wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
-
- wm8350->rtc.update_enabled = enabled;
-
- return 0;
-}
-
static irqreturn_t wm8350_rtc_alarm_handler(int irq, void *data)
{
struct wm8350 *wm8350 = data;
@@ -357,7 +337,6 @@ static const struct rtc_class_ops wm8350_rtc_ops = {
.read_alarm = wm8350_rtc_readalarm,
.set_alarm = wm8350_rtc_setalarm,
.alarm_irq_enable = wm8350_rtc_alarm_irq_enable,
- .update_irq_enable = wm8350_rtc_update_irq_enable,
};
#ifdef CONFIG_PM
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 318672d..a9fe23d 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -72,7 +72,7 @@ static struct dasd_discipline dasd_eckd_discipline;
static struct ccw_device_id dasd_eckd_ids[] = {
{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
- { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3},
+ { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
{ CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index c881a14..1f6a4d8 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -62,8 +62,8 @@ static int xpram_devs;
/*
* Parameter parsing functions.
*/
-static int __initdata devs = XPRAM_DEVS;
-static char __initdata *sizes[XPRAM_MAX_DEVS];
+static int devs = XPRAM_DEVS;
+static char *sizes[XPRAM_MAX_DEVS];
module_param(devs, int, 0);
module_param_array(sizes, charp, NULL, 0);
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 8cd58e4..5ad44da 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -460,7 +460,8 @@ kbd_ioctl(struct kbd_data *kbd, struct file *file,
unsigned int cmd, unsigned long arg)
{
void __user *argp;
- int ct, perm;
+ unsigned int ct;
+ int perm;
argp = (void __user *)arg;
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 7a242f0..267b54e 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -280,6 +280,14 @@ tape_do_io_free(struct tape_device *device, struct tape_request *request)
return rc;
}
+static inline void
+tape_do_io_async_free(struct tape_device *device, struct tape_request *request)
+{
+ request->callback = (void *) tape_free_request;
+ request->callback_data = NULL;
+ tape_do_io_async(device, request);
+}
+
extern int tape_oper_handler(int irq, int status);
extern void tape_noper_handler(int irq, int status);
extern int tape_open(struct tape_device *);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index c17f35b..c265111 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -53,23 +53,11 @@ static void tape_34xx_delete_sbid_from(struct tape_device *, int);
* Medium sense for 34xx tapes. There is no 'real' medium sense call.
* So we just do a normal sense.
*/
-static int
-tape_34xx_medium_sense(struct tape_device *device)
+static void __tape_34xx_medium_sense(struct tape_request *request)
{
- struct tape_request *request;
- unsigned char *sense;
- int rc;
-
- request = tape_alloc_request(1, 32);
- if (IS_ERR(request)) {
- DBF_EXCEPTION(6, "MSEN fail\n");
- return PTR_ERR(request);
- }
-
- request->op = TO_MSEN;
- tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+ struct tape_device *device = request->device;
+ unsigned char *sense;
- rc = tape_do_io_interruptible(device, request);
if (request->rc == 0) {
sense = request->cpdata;
@@ -88,15 +76,47 @@ tape_34xx_medium_sense(struct tape_device *device)
device->tape_generic_status |= GMT_WR_PROT(~0);
else
device->tape_generic_status &= ~GMT_WR_PROT(~0);
- } else {
+ } else
DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n",
request->rc);
- }
tape_free_request(request);
+}
+
+static int tape_34xx_medium_sense(struct tape_device *device)
+{
+ struct tape_request *request;
+ int rc;
+
+ request = tape_alloc_request(1, 32);
+ if (IS_ERR(request)) {
+ DBF_EXCEPTION(6, "MSEN fail\n");
+ return PTR_ERR(request);
+ }
+ request->op = TO_MSEN;
+ tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+ rc = tape_do_io_interruptible(device, request);
+ __tape_34xx_medium_sense(request);
return rc;
}
+static void tape_34xx_medium_sense_async(struct tape_device *device)
+{
+ struct tape_request *request;
+
+ request = tape_alloc_request(1, 32);
+ if (IS_ERR(request)) {
+ DBF_EXCEPTION(6, "MSEN fail\n");
+ return;
+ }
+
+ request->op = TO_MSEN;
+ tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
+ request->callback = (void *) __tape_34xx_medium_sense;
+ request->callback_data = NULL;
+ tape_do_io_async(device, request);
+}
+
struct tape_34xx_work {
struct tape_device *device;
enum tape_op op;
@@ -109,6 +129,9 @@ struct tape_34xx_work {
* is inserted but cannot call tape_do_io* from an interrupt context.
* Maybe that's useful for other actions we want to start from the
* interrupt handler.
+ * Note: the work handler is called by the system work queue. The tape
+ * commands started by the handler need to be asynchrounous, otherwise
+ * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
*/
static void
tape_34xx_work_handler(struct work_struct *work)
@@ -119,7 +142,7 @@ tape_34xx_work_handler(struct work_struct *work)
switch(p->op) {
case TO_MSEN:
- tape_34xx_medium_sense(device);
+ tape_34xx_medium_sense_async(device);
break;
default:
DBF_EVENT(3, "T34XX: internal error: unknown work\n");
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index fbe361f..de2e99e 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -329,17 +329,17 @@ out:
/*
* Enable encryption
*/
-static int tape_3592_enable_crypt(struct tape_device *device)
+static struct tape_request *__tape_3592_enable_crypt(struct tape_device *device)
{
struct tape_request *request;
char *data;
DBF_EVENT(6, "tape_3592_enable_crypt\n");
if (!crypt_supported(device))
- return -ENOSYS;
+ return ERR_PTR(-ENOSYS);
request = tape_alloc_request(2, 72);
if (IS_ERR(request))
- return PTR_ERR(request);
+ return request;
data = request->cpdata;
memset(data,0,72);
@@ -354,23 +354,42 @@ static int tape_3592_enable_crypt(struct tape_device *device)
request->op = TO_CRYPT_ON;
tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
+ return request;
+}
+
+static int tape_3592_enable_crypt(struct tape_device *device)
+{
+ struct tape_request *request;
+
+ request = __tape_3592_enable_crypt(device);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
return tape_do_io_free(device, request);
}
+static void tape_3592_enable_crypt_async(struct tape_device *device)
+{
+ struct tape_request *request;
+
+ request = __tape_3592_enable_crypt(device);
+ if (!IS_ERR(request))
+ tape_do_io_async_free(device, request);
+}
+
/*
* Disable encryption
*/
-static int tape_3592_disable_crypt(struct tape_device *device)
+static struct tape_request *__tape_3592_disable_crypt(struct tape_device *device)
{
struct tape_request *request;
char *data;
DBF_EVENT(6, "tape_3592_disable_crypt\n");
if (!crypt_supported(device))
- return -ENOSYS;
+ return ERR_PTR(-ENOSYS);
request = tape_alloc_request(2, 72);
if (IS_ERR(request))
- return PTR_ERR(request);
+ return request;
data = request->cpdata;
memset(data,0,72);
@@ -383,9 +402,28 @@ static int tape_3592_disable_crypt(struct tape_device *device)
tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
+ return request;
+}
+
+static int tape_3592_disable_crypt(struct tape_device *device)
+{
+ struct tape_request *request;
+
+ request = __tape_3592_disable_crypt(device);
+ if (IS_ERR(request))
+ return PTR_ERR(request);
return tape_do_io_free(device, request);
}
+static void tape_3592_disable_crypt_async(struct tape_device *device)
+{
+ struct tape_request *request;
+
+ request = __tape_3592_disable_crypt(device);
+ if (!IS_ERR(request))
+ tape_do_io_async_free(device, request);
+}
+
/*
* IOCTL: Set encryption status
*/
@@ -457,8 +495,7 @@ tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
/*
* SENSE Medium: Get Sense data about medium state
*/
-static int
-tape_3590_sense_medium(struct tape_device *device)
+static int tape_3590_sense_medium(struct tape_device *device)
{
struct tape_request *request;
@@ -470,6 +507,18 @@ tape_3590_sense_medium(struct tape_device *device)
return tape_do_io_free(device, request);
}
+static void tape_3590_sense_medium_async(struct tape_device *device)
+{
+ struct tape_request *request;
+
+ request = tape_alloc_request(1, 128);
+ if (IS_ERR(request))
+ return;
+ request->op = TO_MSEN;
+ tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata);
+ tape_do_io_async_free(device, request);
+}
+
/*
* MTTELL: Tell block. Return the number of block relative to current file.
*/
@@ -546,15 +595,14 @@ tape_3590_read_opposite(struct tape_device *device,
* 2. The attention msg is written to the "read subsystem data" buffer.
* In this case we probably should print it to the console.
*/
-static int
-tape_3590_read_attmsg(struct tape_device *device)
+static void tape_3590_read_attmsg_async(struct tape_device *device)
{
struct tape_request *request;
char *buf;
request = tape_alloc_request(3, 4096);
if (IS_ERR(request))
- return PTR_ERR(request);
+ return;
request->op = TO_READ_ATTMSG;
buf = request->cpdata;
buf[0] = PREP_RD_SS_DATA;
@@ -562,12 +610,15 @@ tape_3590_read_attmsg(struct tape_device *device)
tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf);
tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12);
tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
- return tape_do_io_free(device, request);
+ tape_do_io_async_free(device, request);
}
/*
* These functions are used to schedule follow-up actions from within an
* interrupt context (like unsolicited interrupts).
+ * Note: the work handler is called by the system work queue. The tape
+ * commands started by the handler need to be asynchrounous, otherwise
+ * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
*/
struct work_handler_data {
struct tape_device *device;
@@ -583,16 +634,16 @@ tape_3590_work_handler(struct work_struct *work)
switch (p->op) {
case TO_MSEN:
- tape_3590_sense_medium(p->device);
+ tape_3590_sense_medium_async(p->device);
break;
case TO_READ_ATTMSG:
- tape_3590_read_attmsg(p->device);
+ tape_3590_read_attmsg_async(p->device);
break;
case TO_CRYPT_ON:
- tape_3592_enable_crypt(p->device);
+ tape_3592_enable_crypt_async(p->device);
break;
case TO_CRYPT_OFF:
- tape_3592_disable_crypt(p->device);
+ tape_3592_disable_crypt_async(p->device);
break;
default:
DBF_EVENT(3, "T3590: work handler undefined for "
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 2e9a87e..ef6de66 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -165,7 +165,7 @@ scsi_mod-$(CONFIG_SCSI_NETLINK) += scsi_netlink.o
scsi_mod-$(CONFIG_SYSCTL) += scsi_sysctl.o
scsi_mod-$(CONFIG_SCSI_PROC_FS) += scsi_proc.o
scsi_mod-y += scsi_trace.o
-scsi_mod-$(CONFIG_PM_OPS) += scsi_pm.o
+scsi_mod-$(CONFIG_PM) += scsi_pm.o
scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 79cefbe..638c72b 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -4277,7 +4277,7 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
phba->shost->host_no);
- phba->wq = create_workqueue(phba->wq_name);
+ phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1);
if (!phba->wq) {
shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
"Failed to allocate work queue\n");
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 9c5c8be..d841e98 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6219,11 +6219,10 @@ static struct ata_port_operations ipr_sata_ops = {
};
static struct ata_port_info sata_port_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
- .pio_mask = 0x10, /* pio4 */
- .mwdma_mask = 0x07,
- .udma_mask = 0x7f, /* udma0-6 */
+ .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
+ .pio_mask = ATA_PIO4_ONLY,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
.port_ops = &ipr_sata_ops
};
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index e1a395b4..4d3b704 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -238,37 +238,43 @@ static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
return true;
}
-static void sas_ata_phy_reset(struct ata_port *ap)
+static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
{
+ struct ata_port *ap = link->ap;
struct domain_device *dev = ap->private_data;
struct sas_internal *i =
to_sas_internal(dev->port->ha->core.shost->transportt);
int res = TMF_RESP_FUNC_FAILED;
+ int ret = 0;
if (i->dft->lldd_I_T_nexus_reset)
res = i->dft->lldd_I_T_nexus_reset(dev);
- if (res != TMF_RESP_FUNC_COMPLETE)
+ if (res != TMF_RESP_FUNC_COMPLETE) {
SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __func__);
+ ret = -EAGAIN;
+ }
switch (dev->sata_dev.command_set) {
case ATA_COMMAND_SET:
SAS_DPRINTK("%s: Found ATA device.\n", __func__);
- ap->link.device[0].class = ATA_DEV_ATA;
+ *class = ATA_DEV_ATA;
break;
case ATAPI_COMMAND_SET:
SAS_DPRINTK("%s: Found ATAPI device.\n", __func__);
- ap->link.device[0].class = ATA_DEV_ATAPI;
+ *class = ATA_DEV_ATAPI;
break;
default:
SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
__func__,
dev->sata_dev.command_set);
- ap->link.device[0].class = ATA_DEV_UNKNOWN;
+ *class = ATA_DEV_UNKNOWN;
break;
}
ap->cbl = ATA_CBL_SATA;
+ return ret;
}
static void sas_ata_post_internal(struct ata_queued_cmd *qc)
@@ -349,7 +355,11 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
}
static struct ata_port_operations sas_sata_ops = {
- .phy_reset = sas_ata_phy_reset,
+ .prereset = ata_std_prereset,
+ .softreset = NULL,
+ .hardreset = sas_ata_hard_reset,
+ .postreset = ata_std_postreset,
+ .error_handler = ata_std_error_handler,
.post_internal_cmd = sas_ata_post_internal,
.qc_defer = ata_std_qc_defer,
.qc_prep = ata_noop_qc_prep,
@@ -362,10 +372,9 @@ static struct ata_port_operations sas_sata_ops = {
};
static struct ata_port_info sata_port_info = {
- .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
- ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ,
- .pio_mask = 0x1f, /* PIO0-4 */
- .mwdma_mask = 0x07, /* MWDMA0-2 */
+ .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &sas_sata_ops
};
@@ -781,3 +790,68 @@ int sas_discover_sata(struct domain_device *dev)
return res;
}
+
+void sas_ata_strategy_handler(struct Scsi_Host *shost)
+{
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, shost) {
+ struct domain_device *ddev = sdev_to_domain_dev(sdev);
+ struct ata_port *ap = ddev->sata_dev.ap;
+
+ if (!dev_is_sata(ddev))
+ continue;
+
+ ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata port error handler");
+ ata_scsi_port_error_handler(shost, ap);
+ }
+}
+
+int sas_ata_timed_out(struct scsi_cmnd *cmd, struct sas_task *task,
+ enum blk_eh_timer_return *rtn)
+{
+ struct domain_device *ddev = cmd_to_domain_dev(cmd);
+
+ if (!dev_is_sata(ddev) || task)
+ return 0;
+
+ /* we're a sata device with no task, so this must be a libata
+ * eh timeout. Ideally should hook into libata timeout
+ * handling, but there's no point, it just wants to activate
+ * the eh thread */
+ *rtn = BLK_EH_NOT_HANDLED;
+ return 1;
+}
+
+int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
+ struct list_head *done_q)
+{
+ int rtn = 0;
+ struct scsi_cmnd *cmd, *n;
+ struct ata_port *ap;
+
+ do {
+ LIST_HEAD(sata_q);
+
+ ap = NULL;
+
+ list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
+ struct domain_device *ddev = cmd_to_domain_dev(cmd);
+
+ if (!dev_is_sata(ddev) || TO_SAS_TASK(cmd))
+ continue;
+ if(ap && ap != ddev->sata_dev.ap)
+ continue;
+ ap = ddev->sata_dev.ap;
+ rtn = 1;
+ list_move(&cmd->eh_entry, &sata_q);
+ }
+
+ if (!list_empty(&sata_q)) {
+ ata_port_printk(ap, KERN_DEBUG,"sas eh calling libata cmd error handler\n");
+ ata_scsi_cmd_error_handler(shost, ap, &sata_q);
+ }
+ } while (ap);
+
+ return rtn;
+}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 9a7aaf5..67758ea 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -663,11 +663,16 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
* scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
* command we see here has no sas_task and is thus unknown to the HA.
*/
- if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
- scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
+ if (!sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q))
+ if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
+ scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
out:
+ /* now link into libata eh --- if we have any ata devices */
+ sas_ata_strategy_handler(shost);
+
scsi_eh_flush_done_q(&ha->eh_done_q);
+
SAS_DPRINTK("--- Exit %s\n", __func__);
return;
}
@@ -676,6 +681,11 @@ enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
{
struct sas_task *task = TO_SAS_TASK(cmd);
unsigned long flags;
+ enum blk_eh_timer_return rtn;
+
+ if (sas_ata_timed_out(cmd, task, &rtn))
+ return rtn;
+
if (!task) {
cmd->request->timeout /= 2;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 44578b5..d3e58d7 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1561,6 +1561,7 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
{
struct Scsi_Host *host = rport_to_shost(rport);
fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+ unsigned long flags;
if (!fcport)
return;
@@ -1573,10 +1574,10 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
* Transport has effectively 'deleted' the rport, clear
* all local references.
*/
- spin_lock_irq(host->host_lock);
+ spin_lock_irqsave(host->host_lock, flags);
fcport->rport = fcport->drport = NULL;
*((fc_port_t **)rport->dd_data) = NULL;
- spin_unlock_irq(host->host_lock);
+ spin_unlock_irqrestore(host->host_lock, flags);
if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
return;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f948e1a..d9479c3 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2505,11 +2505,12 @@ qla2x00_rport_del(void *data)
{
fc_port_t *fcport = data;
struct fc_rport *rport;
+ unsigned long flags;
- spin_lock_irq(fcport->vha->host->host_lock);
+ spin_lock_irqsave(fcport->vha->host->host_lock, flags);
rport = fcport->drport ? fcport->drport: fcport->rport;
fcport->drport = NULL;
- spin_unlock_irq(fcport->vha->host->host_lock);
+ spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
if (rport)
fc_remote_port_delete(rport);
}
@@ -2879,6 +2880,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
struct fc_rport_identifiers rport_ids;
struct fc_rport *rport;
struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
qla2x00_rport_del(fcport);
@@ -2893,9 +2895,9 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
"Unable to allocate fc remote port!\n");
return;
}
- spin_lock_irq(fcport->vha->host->host_lock);
+ spin_lock_irqsave(fcport->vha->host->host_lock, flags);
*((fc_port_t **)rport->dd_data) = fcport;
- spin_unlock_irq(fcport->vha->host->host_lock);
+ spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
rport->supported_classes = fcport->supported_classes;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index c194c23..e90f7c1 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -349,7 +349,7 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
"Can't create request queue\n");
goto fail;
}
- ha->wq = create_workqueue("qla2xxx_wq");
+ ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1);
vha->req = ha->req_q_map[req];
options |= BIT_1;
for (ques = 1; ques < ha->max_rsp_queues; ques++) {
@@ -562,7 +562,6 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)
}
if (atomic_read(&fcport->state) != FCS_ONLINE) {
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
- atomic_read(&fcport->state) == FCS_DEVICE_LOST ||
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
@@ -2513,6 +2512,7 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
{
struct fc_rport *rport;
scsi_qla_host_t *base_vha;
+ unsigned long flags;
if (!fcport->rport)
return;
@@ -2520,9 +2520,9 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
rport = fcport->rport;
if (defer) {
base_vha = pci_get_drvdata(vha->hw->pdev);
- spin_lock_irq(vha->host->host_lock);
+ spin_lock_irqsave(vha->host->host_lock, flags);
fcport->drport = rport;
- spin_unlock_irq(vha->host->host_lock);
+ spin_unlock_irqrestore(vha->host->host_lock, flags);
set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
qla2xxx_wake_dpc(base_vha);
} else
@@ -3282,10 +3282,10 @@ qla2x00_do_dpc(void *data)
set_user_nice(current, -20);
+ set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
DEBUG3(printk("qla2x00: DPC handler sleeping\n"));
- set_current_state(TASK_INTERRUPTIBLE);
schedule();
__set_current_state(TASK_RUNNING);
@@ -3454,7 +3454,9 @@ qla2x00_do_dpc(void *data)
qla2x00_do_dpc_all_vps(base_vha);
ha->dpc_active = 0;
+ set_current_state(TASK_INTERRUPTIBLE);
} /* End of while(1) */
+ __set_current_state(TASK_RUNNING);
DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 7b31093..a6b2d72 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1671,7 +1671,7 @@ static int do_device_access(struct scsi_cmnd *scmd,
unsigned long long lba, unsigned int num, int write)
{
int ret;
- unsigned int block, rest = 0;
+ unsigned long long block, rest = 0;
int (*func)(struct scsi_cmnd *, unsigned char *, int);
func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9045c52..fb2bb35 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q)
&sdev->request_queue->queue_flags);
if (flagset)
queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
- __blk_run_queue(sdev->request_queue);
+ __blk_run_queue(sdev->request_queue, false);
if (flagset)
queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
spin_unlock(sdev->request_queue->queue_lock);
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index b4056d1..342ee1a 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -146,7 +146,7 @@ static inline void scsi_netlink_exit(void) {}
#endif
/* scsi_pm.c */
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
extern const struct dev_pm_ops scsi_bus_pm_ops;
#endif
#ifdef CONFIG_PM_RUNTIME
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 490ce21..e44ff64 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -383,7 +383,7 @@ struct bus_type scsi_bus_type = {
.name = "scsi",
.match = scsi_bus_match,
.uevent = scsi_bus_uevent,
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
.pm = &scsi_bus_pm_ops,
#endif
};
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index c399be9..f672820 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -629,7 +629,7 @@ static int __init scsi_tgt_init(void)
if (!scsi_tgt_cmd_cache)
return -ENOMEM;
- scsi_tgtd = create_workqueue("scsi_tgtd");
+ scsi_tgtd = alloc_workqueue("scsi_tgtd", 0, 1);
if (!scsi_tgtd) {
err = -ENOMEM;
goto free_kmemcache;
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 998c01b..5c3ccfc 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
!test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
if (flagset)
queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
- __blk_run_queue(rport->rqst_q);
+ __blk_run_queue(rport->rqst_q, false);
if (flagset)
queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 9592883..a429b01 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1557,9 +1557,7 @@ static int __devinit pxa2xx_spi_probe(struct platform_device *pdev)
drv_data->ssp = ssp;
master->dev.parent = &pdev->dev;
-#ifdef CONFIG_OF
master->dev.of_node = pdev->dev.of_node;
-#endif
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/pxa2xx_spi_pci.c
index 351d8a375..378e504 100644
--- a/drivers/spi/pxa2xx_spi_pci.c
+++ b/drivers/spi/pxa2xx_spi_pci.c
@@ -7,10 +7,9 @@
#include <linux/of_device.h>
#include <linux/spi/pxa2xx_spi.h>
-struct awesome_struct {
+struct ce4100_info {
struct ssp_device ssp;
- struct platform_device spi_pdev;
- struct pxa2xx_spi_master spi_pdata;
+ struct platform_device *spi_pdev;
};
static DEFINE_MUTEX(ssp_lock);
@@ -51,23 +50,15 @@ void pxa_ssp_free(struct ssp_device *ssp)
}
EXPORT_SYMBOL_GPL(pxa_ssp_free);
-static void plat_dev_release(struct device *dev)
-{
- struct awesome_struct *as = container_of(dev,
- struct awesome_struct, spi_pdev.dev);
-
- of_device_node_put(&as->spi_pdev.dev);
-}
-
static int __devinit ce4100_spi_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
int ret;
resource_size_t phys_beg;
resource_size_t phys_len;
- struct awesome_struct *spi_info;
+ struct ce4100_info *spi_info;
struct platform_device *pdev;
- struct pxa2xx_spi_master *spi_pdata;
+ struct pxa2xx_spi_master spi_pdata;
struct ssp_device *ssp;
ret = pci_enable_device(dev);
@@ -84,33 +75,28 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev,
return ret;
}
+ pdev = platform_device_alloc("pxa2xx-spi", dev->devfn);
spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL);
- if (!spi_info) {
+ if (!pdev || !spi_info ) {
ret = -ENOMEM;
- goto err_kz;
+ goto err_nomem;
}
- ssp = &spi_info->ssp;
- pdev = &spi_info->spi_pdev;
- spi_pdata = &spi_info->spi_pdata;
+ memset(&spi_pdata, 0, sizeof(spi_pdata));
+ spi_pdata.num_chipselect = dev->devfn;
- pdev->name = "pxa2xx-spi";
- pdev->id = dev->devfn;
- pdev->dev.parent = &dev->dev;
- pdev->dev.platform_data = &spi_info->spi_pdata;
+ ret = platform_device_add_data(pdev, &spi_pdata, sizeof(spi_pdata));
+ if (ret)
+ goto err_nomem;
-#ifdef CONFIG_OF
+ pdev->dev.parent = &dev->dev;
pdev->dev.of_node = dev->dev.of_node;
-#endif
- pdev->dev.release = plat_dev_release;
-
- spi_pdata->num_chipselect = dev->devfn;
-
+ ssp = &spi_info->ssp;
ssp->phys_base = pci_resource_start(dev, 0);
ssp->mmio_base = ioremap(phys_beg, phys_len);
if (!ssp->mmio_base) {
dev_err(&pdev->dev, "failed to ioremap() registers\n");
ret = -EIO;
- goto err_remap;
+ goto err_nomem;
}
ssp->irq = dev->irq;
ssp->port_id = pdev->id;
@@ -122,7 +108,7 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev,
pci_set_drvdata(dev, spi_info);
- ret = platform_device_register(pdev);
+ ret = platform_device_add(pdev);
if (ret)
goto err_dev_add;
@@ -135,27 +121,21 @@ err_dev_add:
mutex_unlock(&ssp_lock);
iounmap(ssp->mmio_base);
-err_remap:
- kfree(spi_info);
-
-err_kz:
+err_nomem:
release_mem_region(phys_beg, phys_len);
-
+ platform_device_put(pdev);
+ kfree(spi_info);
return ret;
}
static void __devexit ce4100_spi_remove(struct pci_dev *dev)
{
- struct awesome_struct *spi_info;
- struct platform_device *pdev;
+ struct ce4100_info *spi_info;
struct ssp_device *ssp;
spi_info = pci_get_drvdata(dev);
-
ssp = &spi_info->ssp;
- pdev = &spi_info->spi_pdev;
-
- platform_device_unregister(pdev);
+ platform_device_unregister(spi_info->spi_pdev);
iounmap(ssp->mmio_base);
release_mem_region(pci_resource_start(dev, 0),
@@ -171,7 +151,6 @@ static void __devexit ce4100_spi_remove(struct pci_dev *dev)
}
static struct pci_device_id ce4100_spi_devices[] __devinitdata = {
-
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) },
{ },
};
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index 7adaef6..4d2c75d 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -351,14 +351,12 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef CONFIG_OF
static const struct of_device_id xilinx_spi_of_match[] = {
{ .compatible = "xlnx,xps-spi-2.00.a", },
{ .compatible = "xlnx,xps-spi-2.00.b", },
{}
};
MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
-#endif
struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
u32 irq, s16 bus_num, int num_cs, int little_endian, int bits_per_word)
@@ -394,9 +392,7 @@ struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
master->bus_num = bus_num;
master->num_chipselect = num_cs;
-#ifdef CONFIG_OF
master->dev.of_node = dev->of_node;
-#endif
xspi->mem = *mem;
xspi->irq = irq;
@@ -539,9 +535,7 @@ static struct platform_driver xilinx_spi_driver = {
.driver = {
.name = XILINX_SPI_NAME,
.owner = THIS_MODULE,
-#ifdef CONFIG_OF
.of_match_table = xilinx_spi_of_match,
-#endif
},
};
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 5cfd708..973bb19 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -13,8 +13,7 @@ target_core_mod-y := target_core_configfs.o \
target_core_transport.o \
target_core_cdb.o \
target_core_ua.o \
- target_core_rd.o \
- target_core_mib.o
+ target_core_rd.o
obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 2764510..caf8dc18 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -37,7 +37,6 @@
#include <linux/parser.h>
#include <linux/syscalls.h>
#include <linux/configfs.h>
-#include <linux/proc_fs.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
@@ -1971,13 +1970,35 @@ static void target_core_dev_release(struct config_item *item)
{
struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
struct se_subsystem_dev, se_dev_group);
- struct config_group *dev_cg;
-
- if (!(se_dev))
- return;
+ struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
+ struct se_subsystem_api *t = hba->transport;
+ struct config_group *dev_cg = &se_dev->se_dev_group;
- dev_cg = &se_dev->se_dev_group;
kfree(dev_cg->default_groups);
+ /*
+ * This pointer will set when the storage is enabled with:
+ *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
+ */
+ if (se_dev->se_dev_ptr) {
+ printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
+ "virtual_device() for se_dev_ptr: %p\n",
+ se_dev->se_dev_ptr);
+
+ se_free_virtual_device(se_dev->se_dev_ptr, hba);
+ } else {
+ /*
+ * Release struct se_subsystem_dev->se_dev_su_ptr..
+ */
+ printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
+ "device() for se_dev_su_ptr: %p\n",
+ se_dev->se_dev_su_ptr);
+
+ t->free_device(se_dev->se_dev_su_ptr);
+ }
+
+ printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
+ "_dev_t: %p\n", se_dev);
+ kfree(se_dev);
}
static ssize_t target_core_dev_show(struct config_item *item,
@@ -2140,7 +2161,16 @@ static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
NULL,
};
+static void target_core_alua_lu_gp_release(struct config_item *item)
+{
+ struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
+ struct t10_alua_lu_gp, lu_gp_group);
+
+ core_alua_free_lu_gp(lu_gp);
+}
+
static struct configfs_item_operations target_core_alua_lu_gp_ops = {
+ .release = target_core_alua_lu_gp_release,
.show_attribute = target_core_alua_lu_gp_attr_show,
.store_attribute = target_core_alua_lu_gp_attr_store,
};
@@ -2191,9 +2221,11 @@ static void target_core_alua_drop_lu_gp(
printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit"
" Group: core/alua/lu_gps/%s, ID: %hu\n",
config_item_name(item), lu_gp->lu_gp_id);
-
+ /*
+ * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
+ * -> target_core_alua_lu_gp_release()
+ */
config_item_put(item);
- core_alua_free_lu_gp(lu_gp);
}
static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
@@ -2549,7 +2581,16 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
NULL,
};
+static void target_core_alua_tg_pt_gp_release(struct config_item *item)
+{
+ struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
+ struct t10_alua_tg_pt_gp, tg_pt_gp_group);
+
+ core_alua_free_tg_pt_gp(tg_pt_gp);
+}
+
static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
+ .release = target_core_alua_tg_pt_gp_release,
.show_attribute = target_core_alua_tg_pt_gp_attr_show,
.store_attribute = target_core_alua_tg_pt_gp_attr_store,
};
@@ -2602,9 +2643,11 @@ static void target_core_alua_drop_tg_pt_gp(
printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port"
" Group: alua/tg_pt_gps/%s, ID: %hu\n",
config_item_name(item), tg_pt_gp->tg_pt_gp_id);
-
+ /*
+ * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
+ * -> target_core_alua_tg_pt_gp_release().
+ */
config_item_put(item);
- core_alua_free_tg_pt_gp(tg_pt_gp);
}
static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
@@ -2771,13 +2814,11 @@ static void target_core_drop_subdev(
struct se_subsystem_api *t;
struct config_item *df_item;
struct config_group *dev_cg, *tg_pt_gp_cg;
- int i, ret;
+ int i;
hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
- if (mutex_lock_interruptible(&hba->hba_access_mutex))
- goto out;
-
+ mutex_lock(&hba->hba_access_mutex);
t = hba->transport;
spin_lock(&se_global->g_device_lock);
@@ -2791,7 +2832,10 @@ static void target_core_drop_subdev(
config_item_put(df_item);
}
kfree(tg_pt_gp_cg->default_groups);
- core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
+ /*
+ * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
+ * directly from target_core_alua_tg_pt_gp_release().
+ */
T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
dev_cg = &se_dev->se_dev_group;
@@ -2800,38 +2844,12 @@ static void target_core_drop_subdev(
dev_cg->default_groups[i] = NULL;
config_item_put(df_item);
}
-
- config_item_put(item);
/*
- * This pointer will set when the storage is enabled with:
- * `echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
+ * The releasing of se_dev and associated se_dev->se_dev_ptr is done
+ * from target_core_dev_item_ops->release() ->target_core_dev_release().
*/
- if (se_dev->se_dev_ptr) {
- printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
- "virtual_device() for se_dev_ptr: %p\n",
- se_dev->se_dev_ptr);
-
- ret = se_free_virtual_device(se_dev->se_dev_ptr, hba);
- if (ret < 0)
- goto hba_out;
- } else {
- /*
- * Release struct se_subsystem_dev->se_dev_su_ptr..
- */
- printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
- "device() for se_dev_su_ptr: %p\n",
- se_dev->se_dev_su_ptr);
-
- t->free_device(se_dev->se_dev_su_ptr);
- }
-
- printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
- "_dev_t: %p\n", se_dev);
-
-hba_out:
+ config_item_put(item);
mutex_unlock(&hba->hba_access_mutex);
-out:
- kfree(se_dev);
}
static struct configfs_group_operations target_core_hba_group_ops = {
@@ -2914,6 +2932,13 @@ SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR);
CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group);
+static void target_core_hba_release(struct config_item *item)
+{
+ struct se_hba *hba = container_of(to_config_group(item),
+ struct se_hba, hba_group);
+ core_delete_hba(hba);
+}
+
static struct configfs_attribute *target_core_hba_attrs[] = {
&target_core_hba_hba_info.attr,
&target_core_hba_hba_mode.attr,
@@ -2921,6 +2946,7 @@ static struct configfs_attribute *target_core_hba_attrs[] = {
};
static struct configfs_item_operations target_core_hba_item_ops = {
+ .release = target_core_hba_release,
.show_attribute = target_core_hba_attr_show,
.store_attribute = target_core_hba_attr_store,
};
@@ -2997,10 +3023,11 @@ static void target_core_call_delhbafromtarget(
struct config_group *group,
struct config_item *item)
{
- struct se_hba *hba = item_to_hba(item);
-
+ /*
+ * core_delete_hba() is called from target_core_hba_item_ops->release()
+ * -> target_core_hba_release()
+ */
config_item_put(item);
- core_delete_hba(hba);
}
static struct configfs_group_operations target_core_group_ops = {
@@ -3022,7 +3049,6 @@ static int target_core_init_configfs(void)
struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
struct config_group *lu_gp_cg = NULL;
struct configfs_subsystem *subsys;
- struct proc_dir_entry *scsi_target_proc = NULL;
struct t10_alua_lu_gp *lu_gp;
int ret;
@@ -3128,21 +3154,10 @@ static int target_core_init_configfs(void)
if (core_dev_setup_virtual_lun0() < 0)
goto out;
- scsi_target_proc = proc_mkdir("scsi_target", 0);
- if (!(scsi_target_proc)) {
- printk(KERN_ERR "proc_mkdir(scsi_target, 0) failed\n");
- goto out;
- }
- ret = init_scsi_target_mib();
- if (ret < 0)
- goto out;
-
return 0;
out:
configfs_unregister_subsystem(subsys);
- if (scsi_target_proc)
- remove_proc_entry("scsi_target", 0);
core_dev_release_virtual_lun0();
rd_module_exit();
out_global:
@@ -3178,8 +3193,7 @@ static void target_core_exit_configfs(void)
config_item_put(item);
}
kfree(lu_gp_cg->default_groups);
- core_alua_free_lu_gp(se_global->default_lu_gp);
- se_global->default_lu_gp = NULL;
+ lu_gp_cg->default_groups = NULL;
alua_cg = &se_global->alua_group;
for (i = 0; alua_cg->default_groups[i]; i++) {
@@ -3188,6 +3202,7 @@ static void target_core_exit_configfs(void)
config_item_put(item);
}
kfree(alua_cg->default_groups);
+ alua_cg->default_groups = NULL;
hba_cg = &se_global->target_core_hbagroup;
for (i = 0; hba_cg->default_groups[i]; i++) {
@@ -3196,20 +3211,20 @@ static void target_core_exit_configfs(void)
config_item_put(item);
}
kfree(hba_cg->default_groups);
-
- for (i = 0; subsys->su_group.default_groups[i]; i++) {
- item = &subsys->su_group.default_groups[i]->cg_item;
- subsys->su_group.default_groups[i] = NULL;
- config_item_put(item);
- }
+ hba_cg->default_groups = NULL;
+ /*
+ * We expect subsys->su_group.default_groups to be released
+ * by configfs subsystem provider logic..
+ */
+ configfs_unregister_subsystem(subsys);
kfree(subsys->su_group.default_groups);
- configfs_unregister_subsystem(subsys);
+ core_alua_free_lu_gp(se_global->default_lu_gp);
+ se_global->default_lu_gp = NULL;
+
printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric"
" Infrastructure\n");
- remove_scsi_target_mib();
- remove_proc_entry("scsi_target", 0);
core_dev_release_virtual_lun0();
rd_module_exit();
release_se_global();
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 317ce58..5da051a 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -373,11 +373,11 @@ int core_update_device_list_for_node(
/*
* deve->se_lun_acl will be NULL for demo-mode created LUNs
* that have not been explictly concerted to MappedLUNs ->
- * struct se_lun_acl.
+ * struct se_lun_acl, but we remove deve->alua_port_list from
+ * port->sep_alua_list. This also means that active UAs and
+ * NodeACL context specific PR metadata for demo-mode
+ * MappedLUN *deve will be released below..
*/
- if (!(deve->se_lun_acl))
- return 0;
-
spin_lock_bh(&port->sep_alua_lock);
list_del(&deve->alua_port_list);
spin_unlock_bh(&port->sep_alua_lock);
@@ -395,12 +395,14 @@ int core_update_device_list_for_node(
printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
" already set for demo mode -> explict"
" LUN ACL transition\n");
+ spin_unlock_irq(&nacl->device_list_lock);
return -1;
}
if (deve->se_lun != lun) {
printk(KERN_ERR "struct se_dev_entry->se_lun does"
" match passed struct se_lun for demo mode"
" -> explict LUN ACL transition\n");
+ spin_unlock_irq(&nacl->device_list_lock);
return -1;
}
deve->se_lun_acl = lun_acl;
@@ -865,9 +867,6 @@ static void se_dev_stop(struct se_device *dev)
}
}
spin_unlock(&hba->device_lock);
-
- while (atomic_read(&hba->dev_mib_access_count))
- cpu_relax();
}
int se_dev_check_online(struct se_device *dev)
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 32b148d..b65d1c8 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -214,12 +214,22 @@ TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR);
CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group);
+static void target_fabric_mappedlun_release(struct config_item *item)
+{
+ struct se_lun_acl *lacl = container_of(to_config_group(item),
+ struct se_lun_acl, se_lun_group);
+ struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
+
+ core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
+}
+
static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
&target_fabric_mappedlun_write_protect.attr,
NULL,
};
static struct configfs_item_operations target_fabric_mappedlun_item_ops = {
+ .release = target_fabric_mappedlun_release,
.show_attribute = target_fabric_mappedlun_attr_show,
.store_attribute = target_fabric_mappedlun_attr_store,
.allow_link = target_fabric_mappedlun_link,
@@ -337,15 +347,21 @@ static void target_fabric_drop_mappedlun(
struct config_group *group,
struct config_item *item)
{
- struct se_lun_acl *lacl = container_of(to_config_group(item),
- struct se_lun_acl, se_lun_group);
- struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
-
config_item_put(item);
- core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
+}
+
+static void target_fabric_nacl_base_release(struct config_item *item)
+{
+ struct se_node_acl *se_nacl = container_of(to_config_group(item),
+ struct se_node_acl, acl_group);
+ struct se_portal_group *se_tpg = se_nacl->se_tpg;
+ struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+ tf->tf_ops.fabric_drop_nodeacl(se_nacl);
}
static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
+ .release = target_fabric_nacl_base_release,
.show_attribute = target_fabric_nacl_base_attr_show,
.store_attribute = target_fabric_nacl_base_attr_store,
};
@@ -404,9 +420,6 @@ static void target_fabric_drop_nodeacl(
struct config_group *group,
struct config_item *item)
{
- struct se_portal_group *se_tpg = container_of(group,
- struct se_portal_group, tpg_acl_group);
- struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_node_acl *se_nacl = container_of(to_config_group(item),
struct se_node_acl, acl_group);
struct config_item *df_item;
@@ -419,9 +432,10 @@ static void target_fabric_drop_nodeacl(
nacl_cg->default_groups[i] = NULL;
config_item_put(df_item);
}
-
+ /*
+ * struct se_node_acl free is done in target_fabric_nacl_base_release()
+ */
config_item_put(item);
- tf->tf_ops.fabric_drop_nodeacl(se_nacl);
}
static struct configfs_group_operations target_fabric_nacl_group_ops = {
@@ -437,7 +451,18 @@ TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL);
CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group);
+static void target_fabric_np_base_release(struct config_item *item)
+{
+ struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
+ struct se_tpg_np, tpg_np_group);
+ struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent;
+ struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+ tf->tf_ops.fabric_drop_np(se_tpg_np);
+}
+
static struct configfs_item_operations target_fabric_np_base_item_ops = {
+ .release = target_fabric_np_base_release,
.show_attribute = target_fabric_np_base_attr_show,
.store_attribute = target_fabric_np_base_attr_store,
};
@@ -466,6 +491,7 @@ static struct config_group *target_fabric_make_np(
if (!(se_tpg_np) || IS_ERR(se_tpg_np))
return ERR_PTR(-EINVAL);
+ se_tpg_np->tpg_np_parent = se_tpg;
config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
&TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
@@ -476,14 +502,10 @@ static void target_fabric_drop_np(
struct config_group *group,
struct config_item *item)
{
- struct se_portal_group *se_tpg = container_of(group,
- struct se_portal_group, tpg_np_group);
- struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
- struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
- struct se_tpg_np, tpg_np_group);
-
+ /*
+ * struct se_tpg_np is released via target_fabric_np_base_release()
+ */
config_item_put(item);
- tf->tf_ops.fabric_drop_np(se_tpg_np);
}
static struct configfs_group_operations target_fabric_np_group_ops = {
@@ -814,7 +836,18 @@ TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL);
*/
CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group);
+static void target_fabric_tpg_release(struct config_item *item)
+{
+ struct se_portal_group *se_tpg = container_of(to_config_group(item),
+ struct se_portal_group, tpg_group);
+ struct se_wwn *wwn = se_tpg->se_tpg_wwn;
+ struct target_fabric_configfs *tf = wwn->wwn_tf;
+
+ tf->tf_ops.fabric_drop_tpg(se_tpg);
+}
+
static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
+ .release = target_fabric_tpg_release,
.show_attribute = target_fabric_tpg_attr_show,
.store_attribute = target_fabric_tpg_attr_store,
};
@@ -872,8 +905,6 @@ static void target_fabric_drop_tpg(
struct config_group *group,
struct config_item *item)
{
- struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
- struct target_fabric_configfs *tf = wwn->wwn_tf;
struct se_portal_group *se_tpg = container_of(to_config_group(item),
struct se_portal_group, tpg_group);
struct config_group *tpg_cg = &se_tpg->tpg_group;
@@ -890,15 +921,28 @@ static void target_fabric_drop_tpg(
}
config_item_put(item);
- tf->tf_ops.fabric_drop_tpg(se_tpg);
}
+static void target_fabric_release_wwn(struct config_item *item)
+{
+ struct se_wwn *wwn = container_of(to_config_group(item),
+ struct se_wwn, wwn_group);
+ struct target_fabric_configfs *tf = wwn->wwn_tf;
+
+ tf->tf_ops.fabric_drop_wwn(wwn);
+}
+
+static struct configfs_item_operations target_fabric_tpg_item_ops = {
+ .release = target_fabric_release_wwn,
+};
+
static struct configfs_group_operations target_fabric_tpg_group_ops = {
.make_group = target_fabric_make_tpg,
.drop_item = target_fabric_drop_tpg,
};
-TF_CIT_SETUP(tpg, NULL, &target_fabric_tpg_group_ops, NULL);
+TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops,
+ NULL);
/* End of tfc_tpg_cit */
@@ -932,13 +976,7 @@ static void target_fabric_drop_wwn(
struct config_group *group,
struct config_item *item)
{
- struct target_fabric_configfs *tf = container_of(group,
- struct target_fabric_configfs, tf_group);
- struct se_wwn *wwn = container_of(to_config_group(item),
- struct se_wwn, wwn_group);
-
config_item_put(item);
- tf->tf_ops.fabric_drop_wwn(wwn);
}
static struct configfs_group_operations target_fabric_wwn_group_ops = {
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index c6e0d75..67f0c09 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -154,7 +154,7 @@ static struct se_device *iblock_create_virtdevice(
bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
- if (!(bd))
+ if (IS_ERR(bd))
goto failed;
/*
* Setup the local scope queue_limits from struct request_queue->limits
@@ -220,8 +220,10 @@ static void iblock_free_device(void *p)
{
struct iblock_dev *ib_dev = p;
- blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
- bioset_free(ib_dev->ibd_bio_set);
+ if (ib_dev->ibd_bd != NULL)
+ blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+ if (ib_dev->ibd_bio_set != NULL)
+ bioset_free(ib_dev->ibd_bio_set);
kfree(ib_dev);
}
diff --git a/drivers/target/target_core_mib.c b/drivers/target/target_core_mib.c
deleted file mode 100644
index d5a48aa..0000000
--- a/drivers/target/target_core_mib.c
+++ /dev/null
@@ -1,1078 +0,0 @@
-/*******************************************************************************
- * Filename: target_core_mib.c
- *
- * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
- *
- * Nicholas A. Bellinger <nab@linux-iscsi.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- ******************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/string.h>
-#include <linux/version.h>
-#include <generated/utsrelease.h>
-#include <linux/utsname.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/blkdev.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-
-#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
-#include <target/target_core_configfs.h>
-
-#include "target_core_hba.h"
-#include "target_core_mib.h"
-
-/* SCSI mib table index */
-static struct scsi_index_table scsi_index_table;
-
-#ifndef INITIAL_JIFFIES
-#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
-#endif
-
-/* SCSI Instance Table */
-#define SCSI_INST_SW_INDEX 1
-#define SCSI_TRANSPORT_INDEX 1
-
-#define NONE "None"
-#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
-
-static inline int list_is_first(const struct list_head *list,
- const struct list_head *head)
-{
- return list->prev == head;
-}
-
-static void *locate_hba_start(
- struct seq_file *seq,
- loff_t *pos)
-{
- spin_lock(&se_global->g_device_lock);
- return seq_list_start(&se_global->g_se_dev_list, *pos);
-}
-
-static void *locate_hba_next(
- struct seq_file *seq,
- void *v,
- loff_t *pos)
-{
- return seq_list_next(v, &se_global->g_se_dev_list, pos);
-}
-
-static void locate_hba_stop(struct seq_file *seq, void *v)
-{
- spin_unlock(&se_global->g_device_lock);
-}
-
-/****************************************************************************
- * SCSI MIB Tables
- ****************************************************************************/
-
-/*
- * SCSI Instance Table
- */
-static void *scsi_inst_seq_start(
- struct seq_file *seq,
- loff_t *pos)
-{
- spin_lock(&se_global->hba_lock);
- return seq_list_start(&se_global->g_hba_list, *pos);
-}
-
-static void *scsi_inst_seq_next(
- struct seq_file *seq,
- void *v,
- loff_t *pos)
-{
- return seq_list_next(v, &se_global->g_hba_list, pos);
-}
-
-static void scsi_inst_seq_stop(struct seq_file *seq, void *v)
-{
- spin_unlock(&se_global->hba_lock);
-}
-
-static int scsi_inst_seq_show(struct seq_file *seq, void *v)
-{
- struct se_hba *hba = list_entry(v, struct se_hba, hba_list);
-
- if (list_is_first(&hba->hba_list, &se_global->g_hba_list))
- seq_puts(seq, "inst sw_indx\n");
-
- seq_printf(seq, "%u %u\n", hba->hba_index, SCSI_INST_SW_INDEX);
- seq_printf(seq, "plugin: %s version: %s\n",
- hba->transport->name, TARGET_CORE_VERSION);
-
- return 0;
-}
-
-static const struct seq_operations scsi_inst_seq_ops = {
- .start = scsi_inst_seq_start,
- .next = scsi_inst_seq_next,
- .stop = scsi_inst_seq_stop,
- .show = scsi_inst_seq_show
-};
-
-static int scsi_inst_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &scsi_inst_seq_ops);
-}
-
-static const struct file_operations scsi_inst_seq_fops = {
- .owner = THIS_MODULE,
- .open = scsi_inst_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-/*
- * SCSI Device Table
- */
-static void *scsi_dev_seq_start(struct seq_file *seq, loff_t *pos)
-{
- return locate_hba_start(seq, pos);
-}
-
-static void *scsi_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_dev_seq_stop(struct seq_file *seq, void *v)
-{
- locate_hba_stop(seq, v);
-}
-
-static int scsi_dev_seq_show(struct seq_file *seq, void *v)
-{
- struct se_hba *hba;
- struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
- g_se_dev_list);
- struct se_device *dev = se_dev->se_dev_ptr;
- char str[28];
- int k;
-
- if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
- seq_puts(seq, "inst indx role ports\n");
-
- if (!(dev))
- return 0;
-
- hba = dev->se_hba;
- if (!(hba)) {
- /* Log error ? */
- return 0;
- }
-
- seq_printf(seq, "%u %u %s %u\n", hba->hba_index,
- dev->dev_index, "Target", dev->dev_port_count);
-
- memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
-
- /* vendor */
- for (k = 0; k < 8; k++)
- str[k] = ISPRINT(DEV_T10_WWN(dev)->vendor[k]) ?
- DEV_T10_WWN(dev)->vendor[k] : 0x20;
- str[k] = 0x20;
-
- /* model */
- for (k = 0; k < 16; k++)
- str[k+9] = ISPRINT(DEV_T10_WWN(dev)->model[k]) ?
- DEV_T10_WWN(dev)->model[k] : 0x20;
- str[k + 9] = 0;
-
- seq_printf(seq, "dev_alias: %s\n", str);
-
- return 0;
-}
-
-static const struct seq_operations scsi_dev_seq_ops = {
- .start = scsi_dev_seq_start,
- .next = scsi_dev_seq_next,
- .stop = scsi_dev_seq_stop,
- .show = scsi_dev_seq_show
-};
-
-static int scsi_dev_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &scsi_dev_seq_ops);
-}
-
-static const struct file_operations scsi_dev_seq_fops = {
- .owner = THIS_MODULE,
- .open = scsi_dev_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-/*
- * SCSI Port Table
- */
-static void *scsi_port_seq_start(struct seq_file *seq, loff_t *pos)
-{
- return locate_hba_start(seq, pos);
-}
-
-static void *scsi_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_port_seq_stop(struct seq_file *seq, void *v)
-{
- locate_hba_stop(seq, v);
-}
-
-static int scsi_port_seq_show(struct seq_file *seq, void *v)
-{
- struct se_hba *hba;
- struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
- g_se_dev_list);
- struct se_device *dev = se_dev->se_dev_ptr;
- struct se_port *sep, *sep_tmp;
-
- if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
- seq_puts(seq, "inst device indx role busy_count\n");
-
- if (!(dev))
- return 0;
-
- hba = dev->se_hba;
- if (!(hba)) {
- /* Log error ? */
- return 0;
- }
-
- /* FIXME: scsiPortBusyStatuses count */
- spin_lock(&dev->se_port_lock);
- list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
- seq_printf(seq, "%u %u %u %s%u %u\n", hba->hba_index,
- dev->dev_index, sep->sep_index, "Device",
- dev->dev_index, 0);
- }
- spin_unlock(&dev->se_port_lock);
-
- return 0;
-}
-
-static const struct seq_operations scsi_port_seq_ops = {
- .start = scsi_port_seq_start,
- .next = scsi_port_seq_next,
- .stop = scsi_port_seq_stop,
- .show = scsi_port_seq_show
-};
-
-static int scsi_port_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &scsi_port_seq_ops);
-}
-
-static const struct file_operations scsi_port_seq_fops = {
- .owner = THIS_MODULE,
- .open = scsi_port_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-/*
- * SCSI Transport Table
- */
-static void *scsi_transport_seq_start(struct seq_file *seq, loff_t *pos)
-{
- return locate_hba_start(seq, pos);
-}
-
-static void *scsi_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_transport_seq_stop(struct seq_file *seq, void *v)
-{
- locate_hba_stop(seq, v);
-}
-
-static int scsi_transport_seq_show(struct seq_file *seq, void *v)
-{
- struct se_hba *hba;
- struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
- g_se_dev_list);
- struct se_device *dev = se_dev->se_dev_ptr;
- struct se_port *se, *se_tmp;
- struct se_portal_group *tpg;
- struct t10_wwn *wwn;
- char buf[64];
-
- if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
- seq_puts(seq, "inst device indx dev_name\n");
-
- if (!(dev))
- return 0;
-
- hba = dev->se_hba;
- if (!(hba)) {
- /* Log error ? */
- return 0;
- }
-
- wwn = DEV_T10_WWN(dev);
-
- spin_lock(&dev->se_port_lock);
- list_for_each_entry_safe(se, se_tmp, &dev->dev_sep_list, sep_list) {
- tpg = se->sep_tpg;
- sprintf(buf, "scsiTransport%s",
- TPG_TFO(tpg)->get_fabric_name());
-
- seq_printf(seq, "%u %s %u %s+%s\n",
- hba->hba_index, /* scsiTransportIndex */
- buf, /* scsiTransportType */
- (TPG_TFO(tpg)->tpg_get_inst_index != NULL) ?
- TPG_TFO(tpg)->tpg_get_inst_index(tpg) :
- 0,
- TPG_TFO(tpg)->tpg_get_wwn(tpg),
- (strlen(wwn->unit_serial)) ?
- /* scsiTransportDevName */
- wwn->unit_serial : wwn->vendor);
- }
- spin_unlock(&dev->se_port_lock);
-
- return 0;
-}
-
-static const struct seq_operations scsi_transport_seq_ops = {
- .start = scsi_transport_seq_start,
- .next = scsi_transport_seq_next,
- .stop = scsi_transport_seq_stop,
- .show = scsi_transport_seq_show
-};
-
-static int scsi_transport_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &scsi_transport_seq_ops);
-}
-
-static const struct file_operations scsi_transport_seq_fops = {
- .owner = THIS_MODULE,
- .open = scsi_transport_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-/*
- * SCSI Target Device Table
- */
-static void *scsi_tgt_dev_seq_start(struct seq_file *seq, loff_t *pos)
-{
- return locate_hba_start(seq, pos);
-}
-
-static void *scsi_tgt_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_tgt_dev_seq_stop(struct seq_file *seq, void *v)
-{
- locate_hba_stop(seq, v);
-}
-
-
-#define LU_COUNT 1 /* for now */
-static int scsi_tgt_dev_seq_show(struct seq_file *seq, void *v)
-{
- struct se_hba *hba;
- struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
- g_se_dev_list);
- struct se_device *dev = se_dev->se_dev_ptr;
- int non_accessible_lus = 0;
- char status[16];
-
- if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
- seq_puts(seq, "inst indx num_LUs status non_access_LUs"
- " resets\n");
-
- if (!(dev))
- return 0;
-
- hba = dev->se_hba;
- if (!(hba)) {
- /* Log error ? */
- return 0;
- }
-
- switch (dev->dev_status) {
- case TRANSPORT_DEVICE_ACTIVATED:
- strcpy(status, "activated");
- break;
- case TRANSPORT_DEVICE_DEACTIVATED:
- strcpy(status, "deactivated");
- non_accessible_lus = 1;
- break;
- case TRANSPORT_DEVICE_SHUTDOWN:
- strcpy(status, "shutdown");
- non_accessible_lus = 1;
- break;
- case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
- case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
- strcpy(status, "offline");
- non_accessible_lus = 1;
- break;
- default:
- sprintf(status, "unknown(%d)", dev->dev_status);
- non_accessible_lus = 1;
- }
-
- seq_printf(seq, "%u %u %u %s %u %u\n",
- hba->hba_index, dev->dev_index, LU_COUNT,
- status, non_accessible_lus, dev->num_resets);
-
- return 0;
-}
-
-static const struct seq_operations scsi_tgt_dev_seq_ops = {
- .start = scsi_tgt_dev_seq_start,
- .next = scsi_tgt_dev_seq_next,
- .stop = scsi_tgt_dev_seq_stop,
- .show = scsi_tgt_dev_seq_show
-};
-
-static int scsi_tgt_dev_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &scsi_tgt_dev_seq_ops);
-}
-
-static const struct file_operations scsi_tgt_dev_seq_fops = {
- .owner = THIS_MODULE,
- .open = scsi_tgt_dev_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-/*
- * SCSI Target Port Table
- */
-static void *scsi_tgt_port_seq_start(struct seq_file *seq, loff_t *pos)
-{
- return locate_hba_start(seq, pos);
-}
-
-static void *scsi_tgt_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_tgt_port_seq_stop(struct seq_file *seq, void *v)
-{
- locate_hba_stop(seq, v);
-}
-
-static int scsi_tgt_port_seq_show(struct seq_file *seq, void *v)
-{
- struct se_hba *hba;
- struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
- g_se_dev_list);
- struct se_device *dev = se_dev->se_dev_ptr;
- struct se_port *sep, *sep_tmp;
- struct se_portal_group *tpg;
- u32 rx_mbytes, tx_mbytes;
- unsigned long long num_cmds;
- char buf[64];
-
- if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
- seq_puts(seq, "inst device indx name port_index in_cmds"
- " write_mbytes read_mbytes hs_in_cmds\n");
-
- if (!(dev))
- return 0;
-
- hba = dev->se_hba;
- if (!(hba)) {
- /* Log error ? */
- return 0;
- }
-
- spin_lock(&dev->se_port_lock);
- list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
- tpg = sep->sep_tpg;
- sprintf(buf, "%sPort#",
- TPG_TFO(tpg)->get_fabric_name());
-
- seq_printf(seq, "%u %u %u %s%d %s%s%d ",
- hba->hba_index,
- dev->dev_index,
- sep->sep_index,
- buf, sep->sep_index,
- TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+",
- TPG_TFO(tpg)->tpg_get_tag(tpg));
-
- spin_lock(&sep->sep_lun->lun_sep_lock);
- num_cmds = sep->sep_stats.cmd_pdus;
- rx_mbytes = (sep->sep_stats.rx_data_octets >> 20);
- tx_mbytes = (sep->sep_stats.tx_data_octets >> 20);
- spin_unlock(&sep->sep_lun->lun_sep_lock);
-
- seq_printf(seq, "%llu %u %u %u\n", num_cmds,
- rx_mbytes, tx_mbytes, 0);
- }
- spin_unlock(&dev->se_port_lock);
-
- return 0;
-}
-
-static const struct seq_operations scsi_tgt_port_seq_ops = {
- .start = scsi_tgt_port_seq_start,
- .next = scsi_tgt_port_seq_next,
- .stop = scsi_tgt_port_seq_stop,
- .show = scsi_tgt_port_seq_show
-};
-
-static int scsi_tgt_port_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &scsi_tgt_port_seq_ops);
-}
-
-static const struct file_operations scsi_tgt_port_seq_fops = {
- .owner = THIS_MODULE,
- .open = scsi_tgt_port_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-/*
- * SCSI Authorized Initiator Table:
- * It contains the SCSI Initiators authorized to be attached to one of the
- * local Target ports.
- * Iterates through all active TPGs and extracts the info from the ACLs
- */
-static void *scsi_auth_intr_seq_start(struct seq_file *seq, loff_t *pos)
-{
- spin_lock_bh(&se_global->se_tpg_lock);
- return seq_list_start(&se_global->g_se_tpg_list, *pos);
-}
-
-static void *scsi_auth_intr_seq_next(struct seq_file *seq, void *v,
- loff_t *pos)
-{
- return seq_list_next(v, &se_global->g_se_tpg_list, pos);
-}
-
-static void scsi_auth_intr_seq_stop(struct seq_file *seq, void *v)
-{
- spin_unlock_bh(&se_global->se_tpg_lock);
-}
-
-static int scsi_auth_intr_seq_show(struct seq_file *seq, void *v)
-{
- struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
- se_tpg_list);
- struct se_dev_entry *deve;
- struct se_lun *lun;
- struct se_node_acl *se_nacl;
- int j;
-
- if (list_is_first(&se_tpg->se_tpg_list,
- &se_global->g_se_tpg_list))
- seq_puts(seq, "inst dev port indx dev_or_port intr_name "
- "map_indx att_count num_cmds read_mbytes "
- "write_mbytes hs_num_cmds creation_time row_status\n");
-
- if (!(se_tpg))
- return 0;
-
- spin_lock(&se_tpg->acl_node_lock);
- list_for_each_entry(se_nacl, &se_tpg->acl_node_list, acl_list) {
-
- atomic_inc(&se_nacl->mib_ref_count);
- smp_mb__after_atomic_inc();
- spin_unlock(&se_tpg->acl_node_lock);
-
- spin_lock_irq(&se_nacl->device_list_lock);
- for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
- deve = &se_nacl->device_list[j];
- if (!(deve->lun_flags &
- TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
- (!deve->se_lun))
- continue;
- lun = deve->se_lun;
- if (!lun->lun_se_dev)
- continue;
-
- seq_printf(seq, "%u %u %u %u %u %s %u %u %u %u %u %u"
- " %u %s\n",
- /* scsiInstIndex */
- (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
- TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
- 0,
- /* scsiDeviceIndex */
- lun->lun_se_dev->dev_index,
- /* scsiAuthIntrTgtPortIndex */
- TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
- /* scsiAuthIntrIndex */
- se_nacl->acl_index,
- /* scsiAuthIntrDevOrPort */
- 1,
- /* scsiAuthIntrName */
- se_nacl->initiatorname[0] ?
- se_nacl->initiatorname : NONE,
- /* FIXME: scsiAuthIntrLunMapIndex */
- 0,
- /* scsiAuthIntrAttachedTimes */
- deve->attach_count,
- /* scsiAuthIntrOutCommands */
- deve->total_cmds,
- /* scsiAuthIntrReadMegaBytes */
- (u32)(deve->read_bytes >> 20),
- /* scsiAuthIntrWrittenMegaBytes */
- (u32)(deve->write_bytes >> 20),
- /* FIXME: scsiAuthIntrHSOutCommands */
- 0,
- /* scsiAuthIntrLastCreation */
- (u32)(((u32)deve->creation_time -
- INITIAL_JIFFIES) * 100 / HZ),
- /* FIXME: scsiAuthIntrRowStatus */
- "Ready");
- }
- spin_unlock_irq(&se_nacl->device_list_lock);
-
- spin_lock(&se_tpg->acl_node_lock);
- atomic_dec(&se_nacl->mib_ref_count);
- smp_mb__after_atomic_dec();
- }
- spin_unlock(&se_tpg->acl_node_lock);
-
- return 0;
-}
-
-static const struct seq_operations scsi_auth_intr_seq_ops = {
- .start = scsi_auth_intr_seq_start,
- .next = scsi_auth_intr_seq_next,
- .stop = scsi_auth_intr_seq_stop,
- .show = scsi_auth_intr_seq_show
-};
-
-static int scsi_auth_intr_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &scsi_auth_intr_seq_ops);
-}
-
-static const struct file_operations scsi_auth_intr_seq_fops = {
- .owner = THIS_MODULE,
- .open = scsi_auth_intr_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-/*
- * SCSI Attached Initiator Port Table:
- * It lists the SCSI Initiators attached to one of the local Target ports.
- * Iterates through all active TPGs and use active sessions from each TPG
- * to list the info fo this table.
- */
-static void *scsi_att_intr_port_seq_start(struct seq_file *seq, loff_t *pos)
-{
- spin_lock_bh(&se_global->se_tpg_lock);
- return seq_list_start(&se_global->g_se_tpg_list, *pos);
-}
-
-static void *scsi_att_intr_port_seq_next(struct seq_file *seq, void *v,
- loff_t *pos)
-{
- return seq_list_next(v, &se_global->g_se_tpg_list, pos);
-}
-
-static void scsi_att_intr_port_seq_stop(struct seq_file *seq, void *v)
-{
- spin_unlock_bh(&se_global->se_tpg_lock);
-}
-
-static int scsi_att_intr_port_seq_show(struct seq_file *seq, void *v)
-{
- struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
- se_tpg_list);
- struct se_dev_entry *deve;
- struct se_lun *lun;
- struct se_node_acl *se_nacl;
- struct se_session *se_sess;
- unsigned char buf[64];
- int j;
-
- if (list_is_first(&se_tpg->se_tpg_list,
- &se_global->g_se_tpg_list))
- seq_puts(seq, "inst dev port indx port_auth_indx port_name"
- " port_ident\n");
-
- if (!(se_tpg))
- return 0;
-
- spin_lock(&se_tpg->session_lock);
- list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
- if ((TPG_TFO(se_tpg)->sess_logged_in(se_sess)) ||
- (!se_sess->se_node_acl) ||
- (!se_sess->se_node_acl->device_list))
- continue;
-
- atomic_inc(&se_sess->mib_ref_count);
- smp_mb__after_atomic_inc();
- se_nacl = se_sess->se_node_acl;
- atomic_inc(&se_nacl->mib_ref_count);
- smp_mb__after_atomic_inc();
- spin_unlock(&se_tpg->session_lock);
-
- spin_lock_irq(&se_nacl->device_list_lock);
- for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
- deve = &se_nacl->device_list[j];
- if (!(deve->lun_flags &
- TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
- (!deve->se_lun))
- continue;
-
- lun = deve->se_lun;
- if (!lun->lun_se_dev)
- continue;
-
- memset(buf, 0, 64);
- if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL)
- TPG_TFO(se_tpg)->sess_get_initiator_sid(
- se_sess, (unsigned char *)&buf[0], 64);
-
- seq_printf(seq, "%u %u %u %u %u %s+i+%s\n",
- /* scsiInstIndex */
- (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
- TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
- 0,
- /* scsiDeviceIndex */
- lun->lun_se_dev->dev_index,
- /* scsiPortIndex */
- TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
- /* scsiAttIntrPortIndex */
- (TPG_TFO(se_tpg)->sess_get_index != NULL) ?
- TPG_TFO(se_tpg)->sess_get_index(se_sess) :
- 0,
- /* scsiAttIntrPortAuthIntrIdx */
- se_nacl->acl_index,
- /* scsiAttIntrPortName */
- se_nacl->initiatorname[0] ?
- se_nacl->initiatorname : NONE,
- /* scsiAttIntrPortIdentifier */
- buf);
- }
- spin_unlock_irq(&se_nacl->device_list_lock);
-
- spin_lock(&se_tpg->session_lock);
- atomic_dec(&se_nacl->mib_ref_count);
- smp_mb__after_atomic_dec();
- atomic_dec(&se_sess->mib_ref_count);
- smp_mb__after_atomic_dec();
- }
- spin_unlock(&se_tpg->session_lock);
-
- return 0;
-}
-
-static const struct seq_operations scsi_att_intr_port_seq_ops = {
- .start = scsi_att_intr_port_seq_start,
- .next = scsi_att_intr_port_seq_next,
- .stop = scsi_att_intr_port_seq_stop,
- .show = scsi_att_intr_port_seq_show
-};
-
-static int scsi_att_intr_port_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &scsi_att_intr_port_seq_ops);
-}
-
-static const struct file_operations scsi_att_intr_port_seq_fops = {
- .owner = THIS_MODULE,
- .open = scsi_att_intr_port_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-/*
- * SCSI Logical Unit Table
- */
-static void *scsi_lu_seq_start(struct seq_file *seq, loff_t *pos)
-{
- return locate_hba_start(seq, pos);
-}
-
-static void *scsi_lu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_lu_seq_stop(struct seq_file *seq, void *v)
-{
- locate_hba_stop(seq, v);
-}
-
-#define SCSI_LU_INDEX 1
-static int scsi_lu_seq_show(struct seq_file *seq, void *v)
-{
- struct se_hba *hba;
- struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
- g_se_dev_list);
- struct se_device *dev = se_dev->se_dev_ptr;
- int j;
- char str[28];
-
- if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
- seq_puts(seq, "inst dev indx LUN lu_name vend prod rev"
- " dev_type status state-bit num_cmds read_mbytes"
- " write_mbytes resets full_stat hs_num_cmds creation_time\n");
-
- if (!(dev))
- return 0;
-
- hba = dev->se_hba;
- if (!(hba)) {
- /* Log error ? */
- return 0;
- }
-
- /* Fix LU state, if we can read it from the device */
- seq_printf(seq, "%u %u %u %llu %s", hba->hba_index,
- dev->dev_index, SCSI_LU_INDEX,
- (unsigned long long)0, /* FIXME: scsiLuDefaultLun */
- (strlen(DEV_T10_WWN(dev)->unit_serial)) ?
- /* scsiLuWwnName */
- (char *)&DEV_T10_WWN(dev)->unit_serial[0] :
- "None");
-
- memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
- /* scsiLuVendorId */
- for (j = 0; j < 8; j++)
- str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ?
- DEV_T10_WWN(dev)->vendor[j] : 0x20;
- str[8] = 0;
- seq_printf(seq, " %s", str);
-
- /* scsiLuProductId */
- for (j = 0; j < 16; j++)
- str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ?
- DEV_T10_WWN(dev)->model[j] : 0x20;
- str[16] = 0;
- seq_printf(seq, " %s", str);
-
- /* scsiLuRevisionId */
- for (j = 0; j < 4; j++)
- str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ?
- DEV_T10_WWN(dev)->revision[j] : 0x20;
- str[4] = 0;
- seq_printf(seq, " %s", str);
-
- seq_printf(seq, " %u %s %s %llu %u %u %u %u %u %u\n",
- /* scsiLuPeripheralType */
- TRANSPORT(dev)->get_device_type(dev),
- (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ?
- "available" : "notavailable", /* scsiLuStatus */
- "exposed", /* scsiLuState */
- (unsigned long long)dev->num_cmds,
- /* scsiLuReadMegaBytes */
- (u32)(dev->read_bytes >> 20),
- /* scsiLuWrittenMegaBytes */
- (u32)(dev->write_bytes >> 20),
- dev->num_resets, /* scsiLuInResets */
- 0, /* scsiLuOutTaskSetFullStatus */
- 0, /* scsiLuHSInCommands */
- (u32)(((u32)dev->creation_time - INITIAL_JIFFIES) *
- 100 / HZ));
-
- return 0;
-}
-
-static const struct seq_operations scsi_lu_seq_ops = {
- .start = scsi_lu_seq_start,
- .next = scsi_lu_seq_next,
- .stop = scsi_lu_seq_stop,
- .show = scsi_lu_seq_show
-};
-
-static int scsi_lu_seq_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &scsi_lu_seq_ops);
-}
-
-static const struct file_operations scsi_lu_seq_fops = {
- .owner = THIS_MODULE,
- .open = scsi_lu_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-/****************************************************************************/
-
-/*
- * Remove proc fs entries
- */
-void remove_scsi_target_mib(void)
-{
- remove_proc_entry("scsi_target/mib/scsi_inst", NULL);
- remove_proc_entry("scsi_target/mib/scsi_dev", NULL);
- remove_proc_entry("scsi_target/mib/scsi_port", NULL);
- remove_proc_entry("scsi_target/mib/scsi_transport", NULL);
- remove_proc_entry("scsi_target/mib/scsi_tgt_dev", NULL);
- remove_proc_entry("scsi_target/mib/scsi_tgt_port", NULL);
- remove_proc_entry("scsi_target/mib/scsi_auth_intr", NULL);
- remove_proc_entry("scsi_target/mib/scsi_att_intr_port", NULL);
- remove_proc_entry("scsi_target/mib/scsi_lu", NULL);
- remove_proc_entry("scsi_target/mib", NULL);
-}
-
-/*
- * Create proc fs entries for the mib tables
- */
-int init_scsi_target_mib(void)
-{
- struct proc_dir_entry *dir_entry;
- struct proc_dir_entry *scsi_inst_entry;
- struct proc_dir_entry *scsi_dev_entry;
- struct proc_dir_entry *scsi_port_entry;
- struct proc_dir_entry *scsi_transport_entry;
- struct proc_dir_entry *scsi_tgt_dev_entry;
- struct proc_dir_entry *scsi_tgt_port_entry;
- struct proc_dir_entry *scsi_auth_intr_entry;
- struct proc_dir_entry *scsi_att_intr_port_entry;
- struct proc_dir_entry *scsi_lu_entry;
-
- dir_entry = proc_mkdir("scsi_target/mib", NULL);
- if (!(dir_entry)) {
- printk(KERN_ERR "proc_mkdir() failed.\n");
- return -1;
- }
-
- scsi_inst_entry =
- create_proc_entry("scsi_target/mib/scsi_inst", 0, NULL);
- if (scsi_inst_entry)
- scsi_inst_entry->proc_fops = &scsi_inst_seq_fops;
- else
- goto error;
-
- scsi_dev_entry =
- create_proc_entry("scsi_target/mib/scsi_dev", 0, NULL);
- if (scsi_dev_entry)
- scsi_dev_entry->proc_fops = &scsi_dev_seq_fops;
- else
- goto error;
-
- scsi_port_entry =
- create_proc_entry("scsi_target/mib/scsi_port", 0, NULL);
- if (scsi_port_entry)
- scsi_port_entry->proc_fops = &scsi_port_seq_fops;
- else
- goto error;
-
- scsi_transport_entry =
- create_proc_entry("scsi_target/mib/scsi_transport", 0, NULL);
- if (scsi_transport_entry)
- scsi_transport_entry->proc_fops = &scsi_transport_seq_fops;
- else
- goto error;
-
- scsi_tgt_dev_entry =
- create_proc_entry("scsi_target/mib/scsi_tgt_dev", 0, NULL);
- if (scsi_tgt_dev_entry)
- scsi_tgt_dev_entry->proc_fops = &scsi_tgt_dev_seq_fops;
- else
- goto error;
-
- scsi_tgt_port_entry =
- create_proc_entry("scsi_target/mib/scsi_tgt_port", 0, NULL);
- if (scsi_tgt_port_entry)
- scsi_tgt_port_entry->proc_fops = &scsi_tgt_port_seq_fops;
- else
- goto error;
-
- scsi_auth_intr_entry =
- create_proc_entry("scsi_target/mib/scsi_auth_intr", 0, NULL);
- if (scsi_auth_intr_entry)
- scsi_auth_intr_entry->proc_fops = &scsi_auth_intr_seq_fops;
- else
- goto error;
-
- scsi_att_intr_port_entry =
- create_proc_entry("scsi_target/mib/scsi_att_intr_port", 0, NULL);
- if (scsi_att_intr_port_entry)
- scsi_att_intr_port_entry->proc_fops =
- &scsi_att_intr_port_seq_fops;
- else
- goto error;
-
- scsi_lu_entry = create_proc_entry("scsi_target/mib/scsi_lu", 0, NULL);
- if (scsi_lu_entry)
- scsi_lu_entry->proc_fops = &scsi_lu_seq_fops;
- else
- goto error;
-
- return 0;
-
-error:
- printk(KERN_ERR "create_proc_entry() failed.\n");
- remove_scsi_target_mib();
- return -1;
-}
-
-/*
- * Initialize the index table for allocating unique row indexes to various mib
- * tables
- */
-void init_scsi_index_table(void)
-{
- memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
- spin_lock_init(&scsi_index_table.lock);
-}
-
-/*
- * Allocate a new row index for the entry type specified
- */
-u32 scsi_get_new_index(scsi_index_t type)
-{
- u32 new_index;
-
- if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
- printk(KERN_ERR "Invalid index type %d\n", type);
- return -1;
- }
-
- spin_lock(&scsi_index_table.lock);
- new_index = ++scsi_index_table.scsi_mib_index[type];
- if (new_index == 0)
- new_index = ++scsi_index_table.scsi_mib_index[type];
- spin_unlock(&scsi_index_table.lock);
-
- return new_index;
-}
-EXPORT_SYMBOL(scsi_get_new_index);
diff --git a/drivers/target/target_core_mib.h b/drivers/target/target_core_mib.h
deleted file mode 100644
index 2772046..0000000
--- a/drivers/target/target_core_mib.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef TARGET_CORE_MIB_H
-#define TARGET_CORE_MIB_H
-
-typedef enum {
- SCSI_INST_INDEX,
- SCSI_DEVICE_INDEX,
- SCSI_AUTH_INTR_INDEX,
- SCSI_INDEX_TYPE_MAX
-} scsi_index_t;
-
-struct scsi_index_table {
- spinlock_t lock;
- u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
-} ____cacheline_aligned;
-
-/* SCSI Port stats */
-struct scsi_port_stats {
- u64 cmd_pdus;
- u64 tx_data_octets;
- u64 rx_data_octets;
-} ____cacheline_aligned;
-
-extern int init_scsi_target_mib(void);
-extern void remove_scsi_target_mib(void);
-extern void init_scsi_index_table(void);
-extern u32 scsi_get_new_index(scsi_index_t);
-
-#endif /*** TARGET_CORE_MIB_H ***/
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 742d246..f2a08477a 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -462,8 +462,8 @@ static struct se_device *pscsi_create_type_disk(
*/
bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
- if (!(bd)) {
- printk("pSCSI: blkdev_get_by_path() failed\n");
+ if (IS_ERR(bd)) {
+ printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n");
scsi_device_put(sd);
return NULL;
}
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 158cecb..4a10983 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -282,6 +282,9 @@ int core_tmr_lun_reset(
atomic_set(&task->task_active, 0);
atomic_set(&task->task_stop, 0);
+ } else {
+ if (atomic_read(&task->task_execute_queue) != 0)
+ transport_remove_task_from_execute_queue(task, dev);
}
__transport_stop_task_timer(task, &flags);
@@ -301,6 +304,7 @@ int core_tmr_lun_reset(
DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
" task: %p, t_fe_count: %d dev: %p\n", task,
fe_count, dev);
+ atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
flags);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
@@ -310,6 +314,7 @@ int core_tmr_lun_reset(
}
DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
" t_fe_count: %d dev: %p\n", task, fe_count, dev);
+ atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index abfa81a..c26f674 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -275,7 +275,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
spin_lock_init(&acl->device_list_lock);
spin_lock_init(&acl->nacl_sess_lock);
atomic_set(&acl->acl_pr_ref_count, 0);
- atomic_set(&acl->mib_ref_count, 0);
acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
@@ -318,12 +317,6 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
cpu_relax();
}
-void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl)
-{
- while (atomic_read(&nacl->mib_ref_count) != 0)
- cpu_relax();
-}
-
void core_tpg_clear_object_luns(struct se_portal_group *tpg)
{
int i, ret;
@@ -480,7 +473,6 @@ int core_tpg_del_initiator_node_acl(
spin_unlock_bh(&tpg->session_lock);
core_tpg_wait_for_nacl_pr_ref(acl);
- core_tpg_wait_for_mib_ref(acl);
core_clear_initiator_node_from_tpg(acl, tpg);
core_free_device_list_for_node(acl, tpg);
@@ -701,6 +693,8 @@ EXPORT_SYMBOL(core_tpg_register);
int core_tpg_deregister(struct se_portal_group *se_tpg)
{
+ struct se_node_acl *nacl, *nacl_tmp;
+
printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
" for endpoint: %s Portal Tag %u\n",
(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
@@ -714,6 +708,25 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
cpu_relax();
+ /*
+ * Release any remaining demo-mode generated se_node_acl that have
+ * not been released because of TFO->tpg_check_demo_mode_cache() == 1
+ * in transport_deregister_session().
+ */
+ spin_lock_bh(&se_tpg->acl_node_lock);
+ list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
+ acl_list) {
+ list_del(&nacl->acl_list);
+ se_tpg->num_node_acls--;
+ spin_unlock_bh(&se_tpg->acl_node_lock);
+
+ core_tpg_wait_for_nacl_pr_ref(nacl);
+ core_free_device_list_for_node(nacl, se_tpg);
+ TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl);
+
+ spin_lock_bh(&se_tpg->acl_node_lock);
+ }
+ spin_unlock_bh(&se_tpg->acl_node_lock);
if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
core_tpg_release_virtual_lun0(se_tpg);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 28b6292ff..4bbf6c1 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -379,6 +379,40 @@ void release_se_global(void)
se_global = NULL;
}
+/* SCSI statistics table index */
+static struct scsi_index_table scsi_index_table;
+
+/*
+ * Initialize the index table for allocating unique row indexes to various mib
+ * tables.
+ */
+void init_scsi_index_table(void)
+{
+ memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
+ spin_lock_init(&scsi_index_table.lock);
+}
+
+/*
+ * Allocate a new row index for the entry type specified
+ */
+u32 scsi_get_new_index(scsi_index_t type)
+{
+ u32 new_index;
+
+ if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
+ printk(KERN_ERR "Invalid index type %d\n", type);
+ return -EINVAL;
+ }
+
+ spin_lock(&scsi_index_table.lock);
+ new_index = ++scsi_index_table.scsi_mib_index[type];
+ if (new_index == 0)
+ new_index = ++scsi_index_table.scsi_mib_index[type];
+ spin_unlock(&scsi_index_table.lock);
+
+ return new_index;
+}
+
void transport_init_queue_obj(struct se_queue_obj *qobj)
{
atomic_set(&qobj->queue_cnt, 0);
@@ -437,7 +471,6 @@ struct se_session *transport_init_session(void)
}
INIT_LIST_HEAD(&se_sess->sess_list);
INIT_LIST_HEAD(&se_sess->sess_acl_list);
- atomic_set(&se_sess->mib_ref_count, 0);
return se_sess;
}
@@ -546,12 +579,6 @@ void transport_deregister_session(struct se_session *se_sess)
transport_free_session(se_sess);
return;
}
- /*
- * Wait for possible reference in drivers/target/target_core_mib.c:
- * scsi_att_intr_port_seq_show()
- */
- while (atomic_read(&se_sess->mib_ref_count) != 0)
- cpu_relax();
spin_lock_bh(&se_tpg->session_lock);
list_del(&se_sess->sess_list);
@@ -574,7 +601,6 @@ void transport_deregister_session(struct se_session *se_sess)
spin_unlock_bh(&se_tpg->acl_node_lock);
core_tpg_wait_for_nacl_pr_ref(se_nacl);
- core_tpg_wait_for_mib_ref(se_nacl);
core_free_device_list_for_node(se_nacl, se_tpg);
TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg,
se_nacl);
@@ -1181,7 +1207,7 @@ transport_get_task_from_execute_queue(struct se_device *dev)
*
*
*/
-static void transport_remove_task_from_execute_queue(
+void transport_remove_task_from_execute_queue(
struct se_task *task,
struct se_device *dev)
{
@@ -4827,6 +4853,8 @@ static int transport_do_se_mem_map(
return ret;
}
+
+ BUG_ON(list_empty(se_mem_list));
/*
* This is the normal path for all normal non BIDI and BIDI-COMMAND
* WRITE payloads.. If we need to do BIDI READ passthrough for
@@ -5008,7 +5036,9 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd)
struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
u32 se_mem_cnt = 0, task_offset = 0;
- BUG_ON(list_empty(cmd->t_task->t_mem_list));
+ if (!list_empty(T_TASK(cmd)->t_mem_list))
+ se_mem = list_entry(T_TASK(cmd)->t_mem_list->next,
+ struct se_mem, se_list);
ret = transport_do_se_mem_map(dev, task,
cmd->t_task->t_mem_list, NULL, se_mem,
@@ -5519,7 +5549,8 @@ static void transport_generic_wait_for_tasks(
atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
}
- if (!atomic_read(&T_TASK(cmd)->t_transport_active))
+ if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
+ atomic_read(&T_TASK(cmd)->t_transport_aborted))
goto remove;
atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
@@ -5926,6 +5957,9 @@ static void transport_processing_shutdown(struct se_device *dev)
atomic_set(&task->task_active, 0);
atomic_set(&task->task_stop, 0);
+ } else {
+ if (atomic_read(&task->task_execute_queue) != 0)
+ transport_remove_task_from_execute_queue(task, dev);
}
__transport_stop_task_timer(task, &flags);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index f7a5dba..bf7c687 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -4,7 +4,6 @@
menuconfig THERMAL
tristate "Generic Thermal sysfs driver"
- depends on NET
help
Generic Thermal Sysfs driver offers a generic mechanism for
thermal management. Usually it's made up of one or more thermal
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 7d0e63c..713b7ea 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -62,20 +62,6 @@ static DEFINE_MUTEX(thermal_list_lock);
static unsigned int thermal_event_seqnum;
-static struct genl_family thermal_event_genl_family = {
- .id = GENL_ID_GENERATE,
- .name = THERMAL_GENL_FAMILY_NAME,
- .version = THERMAL_GENL_VERSION,
- .maxattr = THERMAL_GENL_ATTR_MAX,
-};
-
-static struct genl_multicast_group thermal_event_mcgrp = {
- .name = THERMAL_GENL_MCAST_GROUP_NAME,
-};
-
-static int genetlink_init(void);
-static void genetlink_exit(void);
-
static int get_idr(struct idr *idr, struct mutex *lock, int *id)
{
int err;
@@ -1225,6 +1211,18 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
EXPORT_SYMBOL(thermal_zone_device_unregister);
+#ifdef CONFIG_NET
+static struct genl_family thermal_event_genl_family = {
+ .id = GENL_ID_GENERATE,
+ .name = THERMAL_GENL_FAMILY_NAME,
+ .version = THERMAL_GENL_VERSION,
+ .maxattr = THERMAL_GENL_ATTR_MAX,
+};
+
+static struct genl_multicast_group thermal_event_mcgrp = {
+ .name = THERMAL_GENL_MCAST_GROUP_NAME,
+};
+
int generate_netlink_event(u32 orig, enum events event)
{
struct sk_buff *skb;
@@ -1301,6 +1299,15 @@ static int genetlink_init(void)
return result;
}
+static void genetlink_exit(void)
+{
+ genl_unregister_family(&thermal_event_genl_family);
+}
+#else /* !CONFIG_NET */
+static inline int genetlink_init(void) { return 0; }
+static inline void genetlink_exit(void) {}
+#endif /* !CONFIG_NET */
+
static int __init thermal_init(void)
{
int result = 0;
@@ -1316,11 +1323,6 @@ static int __init thermal_init(void)
return result;
}
-static void genetlink_exit(void)
-{
- genl_unregister_family(&thermal_event_genl_family);
-}
-
static void __exit thermal_exit(void)
{
class_unregister(&thermal_class);
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index beb1afa2..7b951ad 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -601,7 +601,7 @@ static int max3100_startup(struct uart_port *port)
s->rts = 0;
sprintf(b, "max3100-%d", s->minor);
- s->workqueue = create_freezeable_workqueue(b);
+ s->workqueue = create_freezable_workqueue(b);
if (!s->workqueue) {
dev_warn(&s->spi->dev, "cannot create workqueue\n");
return -EBUSY;
diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c
index 910870e..750b4f6 100644
--- a/drivers/tty/serial/max3107.c
+++ b/drivers/tty/serial/max3107.c
@@ -833,7 +833,7 @@ static int max3107_startup(struct uart_port *port)
struct max3107_port *s = container_of(port, struct max3107_port, port);
/* Initialize work queue */
- s->workqueue = create_freezeable_workqueue("max3107");
+ s->workqueue = create_freezable_workqueue("max3107");
if (!s->workqueue) {
dev_err(&s->spi->dev, "Workqueue creation failed\n");
return -EBUSY;
diff --git a/drivers/tty/serial/serial_cs.c b/drivers/tty/serial/serial_cs.c
index 93760b2..1ef4df9 100644
--- a/drivers/tty/serial/serial_cs.c
+++ b/drivers/tty/serial/serial_cs.c
@@ -712,6 +712,7 @@ static struct pcmcia_device_id serial_ids[] = {
PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf),
PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01),
PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05),
+ PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0b05),
PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101),
PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070),
PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562),
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index b992a88..ce22f4a 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -335,7 +335,7 @@ void usb_hcd_pci_shutdown(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(usb_hcd_pci_shutdown);
-#ifdef CONFIG_PM_OPS
+#ifdef CONFIG_PM
#ifdef CONFIG_PPC_PMAC
static void powermac_set_asic(struct pci_dev *pci_dev, int enable)
@@ -598,4 +598,4 @@ const struct dev_pm_ops usb_hcd_pci_pm_ops = {
};
EXPORT_SYMBOL_GPL(usb_hcd_pci_pm_ops);
-#endif /* CONFIG_PM_OPS */
+#endif /* CONFIG_PM */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 0968157..564eaa5 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1508,6 +1508,7 @@ void usb_set_device_state(struct usb_device *udev,
enum usb_device_state new_state)
{
unsigned long flags;
+ int wakeup = -1;
spin_lock_irqsave(&device_state_lock, flags);
if (udev->state == USB_STATE_NOTATTACHED)
@@ -1522,11 +1523,10 @@ void usb_set_device_state(struct usb_device *udev,
|| new_state == USB_STATE_SUSPENDED)
; /* No change to wakeup settings */
else if (new_state == USB_STATE_CONFIGURED)
- device_set_wakeup_capable(&udev->dev,
- (udev->actconfig->desc.bmAttributes
- & USB_CONFIG_ATT_WAKEUP));
+ wakeup = udev->actconfig->desc.bmAttributes
+ & USB_CONFIG_ATT_WAKEUP;
else
- device_set_wakeup_capable(&udev->dev, 0);
+ wakeup = 0;
}
if (udev->state == USB_STATE_SUSPENDED &&
new_state != USB_STATE_SUSPENDED)
@@ -1538,6 +1538,8 @@ void usb_set_device_state(struct usb_device *udev,
} else
recursively_mark_NOTATTACHED(udev);
spin_unlock_irqrestore(&device_state_lock, flags);
+ if (wakeup >= 0)
+ device_set_wakeup_capable(&udev->dev, wakeup);
}
EXPORT_SYMBOL_GPL(usb_set_device_state);
@@ -2732,17 +2734,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
mutex_lock(&usb_address0_mutex);
- if (!udev->config && oldspeed == USB_SPEED_SUPER) {
- /* Don't reset USB 3.0 devices during an initial setup */
- usb_set_device_state(udev, USB_STATE_DEFAULT);
- } else {
- /* Reset the device; full speed may morph to high speed */
- /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
- retval = hub_port_reset(hub, port1, udev, delay);
- if (retval < 0) /* error or disconnect */
- goto fail;
- /* success, speed is known */
- }
+ /* Reset the device; full speed may morph to high speed */
+ /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
+ retval = hub_port_reset(hub, port1, udev, delay);
+ if (retval < 0) /* error or disconnect */
+ goto fail;
+ /* success, speed is known */
+
retval = -ENODEV;
if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 44c5954..81ce6a8 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -48,6 +48,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x04b4, 0x0526), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
+ /* Samsung Android phone modem - ID conflict with SPH-I500 */
+ { USB_DEVICE(0x04e8, 0x6601), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* Roland SC-8820 */
{ USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -68,6 +72,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Keytouch QWERTY Panel keyboard */
+ { USB_DEVICE(0x0926, 0x3333), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
{ USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index e8f4f36..a6f21b8 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -29,6 +29,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/of_address.h>
/**
* ehci_xilinx_of_setup - Initialize the device for ehci_reset()
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index fcbf4ab..0231814 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -169,9 +169,10 @@ static void xhci_print_ports(struct xhci_hcd *xhci)
}
}
-void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num)
+void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
{
- void *addr;
+ struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num];
+ void __iomem *addr;
u32 temp;
u64 temp_64;
@@ -449,7 +450,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
}
}
-void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
+static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
{
/* Fields are 32 bits wide, DMA addresses are in bytes */
int field_size = 32 / 8;
@@ -488,7 +489,7 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
}
-void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
+static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx,
unsigned int last_ep)
{
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 71fd8bd..a003e79 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -307,7 +307,7 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
/***************** Streams structures manipulation *************************/
-void xhci_free_stream_ctx(struct xhci_hcd *xhci,
+static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
{
@@ -335,7 +335,7 @@ void xhci_free_stream_ctx(struct xhci_hcd *xhci,
* The stream context array must be a power of 2, and can be as small as
* 64 bytes or as large as 1MB.
*/
-struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
+static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs, dma_addr_t *dma,
gfp_t mem_flags)
{
@@ -1973,11 +1973,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
val &= DBOFF_MASK;
xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
" from cap regs base addr\n", val);
- xhci->dba = (void *) xhci->cap_regs + val;
+ xhci->dba = (void __iomem *) xhci->cap_regs + val;
xhci_dbg_regs(xhci);
xhci_print_run_regs(xhci);
/* Set ir_set to interrupt register set 0 */
- xhci->ir_set = (void *) xhci->run_regs->ir_set;
+ xhci->ir_set = &xhci->run_regs->ir_set[0];
/*
* Event ring setup: Allocate a normal ring, but also setup
@@ -2034,7 +2034,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
/* Set the event ring dequeue address */
xhci_set_hc_event_deq(xhci);
xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_print_ir_set(xhci, 0);
/*
* XXX: Might need to set the Interrupter Moderation Register to
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 032af7e..cfc1ad9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -472,8 +472,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
dev->eps[ep_index].stopped_trb,
&state->new_cycle_state);
- if (!state->new_deq_seg)
- BUG();
+ if (!state->new_deq_seg) {
+ WARN_ON(1);
+ return;
+ }
+
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg(xhci, "Finding endpoint context\n");
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
@@ -484,8 +487,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
state->new_deq_ptr,
&state->new_cycle_state);
- if (!state->new_deq_seg)
- BUG();
+ if (!state->new_deq_seg) {
+ WARN_ON(1);
+ return;
+ }
trb = &state->new_deq_ptr->generic;
if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
@@ -2467,12 +2472,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
/* Scatter gather list entries may cross 64KB boundaries */
running_total = TRB_MAX_BUFF_SIZE -
- (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
if (running_total != 0)
num_trbs++;
/* How many more 64KB chunks to transfer, how many more TRBs? */
- while (running_total < sg_dma_len(sg)) {
+ while (running_total < sg_dma_len(sg) && running_total < temp) {
num_trbs++;
running_total += TRB_MAX_BUFF_SIZE;
}
@@ -2498,11 +2504,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
{
if (num_trbs != 0)
- dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
+ dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
"TRBs, %d left\n", __func__,
urb->ep->desc.bEndpointAddress, num_trbs);
if (running_total != urb->transfer_buffer_length)
- dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
+ dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
"queued %#x (%d), asked for %#x (%d)\n",
__func__,
urb->ep->desc.bEndpointAddress,
@@ -2637,8 +2643,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
sg = urb->sg;
addr = (u64) sg_dma_address(sg);
this_sg_len = sg_dma_len(sg);
- trb_buff_len = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
@@ -2676,7 +2681,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
(unsigned int) addr + trb_buff_len);
if (TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
+ (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
@@ -2720,7 +2725,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
trb_buff_len = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (addr & (TRB_MAX_BUFF_SIZE - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (running_total + trb_buff_len > urb->transfer_buffer_length)
trb_buff_len =
@@ -2760,7 +2765,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
num_trbs = 0;
/* How much data is (potentially) left before the 64KB boundary? */
running_total = TRB_MAX_BUFF_SIZE -
- (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
/* If there's some data on this 64KB chunk, or we have to send a
* zero-length transfer, we need at least one TRB
@@ -2804,8 +2810,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* How much data is in the first TRB? */
addr = (u64) urb->transfer_dma;
trb_buff_len = TRB_MAX_BUFF_SIZE -
- (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
- if (urb->transfer_buffer_length < trb_buff_len)
+ (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
+ if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
first_trb = true;
@@ -2983,8 +2989,8 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
td_len = urb->iso_frame_desc[i].length;
- running_total = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
+ running_total &= TRB_MAX_BUFF_SIZE - 1;
if (running_total != 0)
num_trbs++;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 2c11411..9a3645f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -112,7 +112,7 @@ int xhci_halt(struct xhci_hcd *xhci)
/*
* Set the run bit and wait for the host to be running.
*/
-int xhci_start(struct xhci_hcd *xhci)
+static int xhci_start(struct xhci_hcd *xhci)
{
u32 temp;
int ret;
@@ -332,7 +332,7 @@ int xhci_init(struct usb_hcd *hcd)
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
-void xhci_event_ring_work(unsigned long arg)
+static void xhci_event_ring_work(unsigned long arg)
{
unsigned long flags;
int temp;
@@ -490,7 +490,7 @@ int xhci_run(struct usb_hcd *hcd)
xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
xhci_writel(xhci, ER_IRQ_ENABLE(temp),
&xhci->ir_set->irq_pending);
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_print_ir_set(xhci, 0);
if (xhci->quirks & XHCI_NEC_HOST)
xhci_queue_vendor_command(xhci, 0, 0, 0,
@@ -556,7 +556,7 @@ void xhci_stop(struct usb_hcd *hcd)
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
xhci_writel(xhci, ER_IRQ_DISABLE(temp),
&xhci->ir_set->irq_pending);
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_print_ir_set(xhci, 0);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
@@ -791,7 +791,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
xhci_writel(xhci, ER_IRQ_DISABLE(temp),
&xhci->ir_set->irq_pending);
- xhci_print_ir_set(xhci, xhci->ir_set, 0);
+ xhci_print_ir_set(xhci, 0);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
@@ -908,7 +908,7 @@ unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
/* Returns 1 if the arguments are OK;
* returns 0 this is a root hub; returns -EINVAL for NULL pointers.
*/
-int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
+static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
const char *func) {
struct xhci_hcd *xhci;
@@ -1744,7 +1744,7 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
}
-void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
+static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state)
{
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 7aca6b1..711de25 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1369,7 +1369,7 @@ static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
}
/* xHCI debugging */
-void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
+void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num);
void xhci_print_registers(struct xhci_hcd *xhci);
void xhci_dbg_regs(struct xhci_hcd *xhci);
void xhci_print_run_regs(struct xhci_hcd *xhci);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 36376d2..a914010 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1864,6 +1864,7 @@ allocate_instance(struct device *dev,
INIT_LIST_HEAD(&musb->out_bulk);
hcd->uses_new_polling = 1;
+ hcd->has_tt = 1;
musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 5cb50f8..4f0dd2e 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -484,6 +484,15 @@ struct musb {
unsigned set_address:1;
unsigned test_mode:1;
unsigned softconnect:1;
+
+ u8 address;
+ u8 test_mode_nr;
+ u16 ackpend; /* ep0 */
+ enum musb_g_ep0_state ep0_state;
+ struct usb_gadget g; /* the gadget */
+ struct usb_gadget_driver *gadget_driver; /* its driver */
+#endif
+
/*
* FIXME: Remove this flag.
*
@@ -497,14 +506,6 @@ struct musb {
*/
unsigned double_buffer_not_ok:1 __deprecated;
- u8 address;
- u8 test_mode_nr;
- u16 ackpend; /* ep0 */
- enum musb_g_ep0_state ep0_state;
- struct usb_gadget g; /* the gadget */
- struct usb_gadget_driver *gadget_driver; /* its driver */
-#endif
-
struct musb_hdrc_config *config;
#ifdef MUSB_CONFIG_PROC_FS
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index f5d4f36..25cb8b0 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -396,6 +396,7 @@ static void omap2430_musb_disable(struct musb *musb)
static int omap2430_musb_exit(struct musb *musb)
{
+ del_timer_sync(&musb_idle_timer);
omap2430_low_level_exit(musb);
otg_put_transceiver(musb->xceiv);
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 2436796..5b88b68 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -301,6 +301,9 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
+ { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
+ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ },
{ USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
{ }
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 817e6ff..eb95aec 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -296,12 +296,15 @@ static void usb_wwan_indat_callback(struct urb *urb)
__func__, status, endpoint);
} else {
tty = tty_port_tty_get(&port->port);
- if (urb->actual_length) {
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
- } else
- dbg("%s: empty read urb received", __func__);
- tty_kref_put(tty);
+ if (tty) {
+ if (urb->actual_length) {
+ tty_insert_flip_string(tty, data,
+ urb->actual_length);
+ tty_flip_buffer_push(tty);
+ } else
+ dbg("%s: empty read urb received", __func__);
+ tty_kref_put(tty);
+ }
/* Resubmit urb so we continue receiving */
if (status != -ESHUTDOWN) {
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 15a5d89..1c11959 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -27,6 +27,7 @@
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
+#include <linux/usb/cdc.h>
#include "visor.h"
/*
@@ -479,6 +480,17 @@ static int visor_probe(struct usb_serial *serial,
dbg("%s", __func__);
+ /*
+ * some Samsung Android phones in modem mode have the same ID
+ * as SPH-I500, but they are ACM devices, so dont bind to them
+ */
+ if (id->idVendor == SAMSUNG_VENDOR_ID &&
+ id->idProduct == SAMSUNG_SPH_I500_ID &&
+ serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM &&
+ serial->dev->descriptor.bDeviceSubClass ==
+ USB_CDC_SUBCLASS_ACM)
+ return -ENODEV;
+
if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
dev_err(&serial->dev->dev, "active config #%d != 1 ??\n",
serial->dev->actconfig->desc.bConfigurationValue);
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c
index 8010aae..dd0e84a 100644
--- a/drivers/video/backlight/ltv350qv.c
+++ b/drivers/video/backlight/ltv350qv.c
@@ -239,11 +239,15 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
lcd->spi = spi;
lcd->power = FB_BLANK_POWERDOWN;
lcd->buffer = kzalloc(8, GFP_KERNEL);
+ if (!lcd->buffer) {
+ ret = -ENOMEM;
+ goto out_free_lcd;
+ }
ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops);
if (IS_ERR(ld)) {
ret = PTR_ERR(ld);
- goto out_free_lcd;
+ goto out_free_buffer;
}
lcd->ld = ld;
@@ -257,6 +261,8 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
out_unregister:
lcd_device_unregister(ld);
+out_free_buffer:
+ kfree(lcd->buffer);
out_free_lcd:
kfree(lcd);
return ret;
@@ -268,6 +274,7 @@ static int __devexit ltv350qv_remove(struct spi_device *spi)
ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
lcd_device_unregister(lcd->ld);
+ kfree(lcd->buffer);
kfree(lcd);
return 0;
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index eca855a..3de4ba0 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -646,7 +646,7 @@ static int __devexit cpwd_remove(struct platform_device *op)
struct cpwd *p = dev_get_drvdata(&op->dev);
int i;
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < WD_NUMDEVS; i++) {
misc_deregister(&p->devs[i].misc);
if (!p->enabled) {
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 24b966d..204a560 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -710,7 +710,7 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev)
return 0;
}
-static void __devexit hpwdt_exit_nmi_decoding(void)
+static void hpwdt_exit_nmi_decoding(void)
{
unregister_die_notifier(&die_notifier);
if (cru_rom_addr)
@@ -726,7 +726,7 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev)
return 0;
}
-static void __devexit hpwdt_exit_nmi_decoding(void)
+static void hpwdt_exit_nmi_decoding(void)
{
}
#endif /* CONFIG_HPWDT_NMI_DECODING */
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index c7d67e9..7990625 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -201,11 +201,14 @@ static struct miscdevice fitpc2_wdt_miscdev = {
static int __init fitpc2_wdt_init(void)
{
int err;
+ const char *brd_name;
- if (!strstr(dmi_get_system_info(DMI_BOARD_NAME), "SBC-FITPC2"))
+ brd_name = dmi_get_system_info(DMI_BOARD_NAME);
+
+ if (!brd_name || !strstr(brd_name, "SBC-FITPC2"))
return -ENODEV;
- pr_info("%s found\n", dmi_get_system_info(DMI_BOARD_NAME));
+ pr_info("%s found\n", brd_name);
if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) {
pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT);
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index 0461858..b61ab1c 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -508,7 +508,7 @@ static int __init sch311x_detect(int sio_config_port, unsigned short *addr)
sch311x_sio_outb(sio_config_port, 0x07, 0x0a);
/* Check if Logical Device Register is currently active */
- if (sch311x_sio_inb(sio_config_port, 0x30) && 0x01 == 0)
+ if ((sch311x_sio_inb(sio_config_port, 0x30) & 0x01) == 0)
printk(KERN_INFO PFX "Seems that LDN 0x0a is not active...\n");
/* Get the base address of the runtime registers */
diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c
index a6c12de..df2a64d 100644
--- a/drivers/watchdog/w83697ug_wdt.c
+++ b/drivers/watchdog/w83697ug_wdt.c
@@ -109,7 +109,7 @@ static int w83697ug_select_wd_register(void)
outb_p(0x08, WDT_EFDR); /* select logical device 8 (GPIO2) */
outb_p(0x30, WDT_EFER); /* select CR30 */
c = inb_p(WDT_EFDR);
- outb_p(c || 0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */
+ outb_p(c | 0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */
return 0;
}
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 43f9f02..718050a 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -232,7 +232,7 @@ static int increase_reservation(unsigned long nr_pages)
set_phys_to_machine(pfn, frame_list[i]);
/* Link back into the page tables if not highmem. */
- if (pfn < max_low_pfn) {
+ if (!xen_hvm_domain() && pfn < max_low_pfn) {
int ret;
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
@@ -280,7 +280,7 @@ static int decrease_reservation(unsigned long nr_pages)
scrub_page(page);
- if (!PageHighMem(page)) {
+ if (!xen_hvm_domain() && !PageHighMem(page)) {
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
__pte_ma(0), 0);
@@ -296,7 +296,7 @@ static int decrease_reservation(unsigned long nr_pages)
/* No more mappings: invalidate P2M and add to balloon. */
for (i = 0; i < nr_pages; i++) {
pfn = mfn_to_pfn(frame_list[i]);
- set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+ __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
balloon_append(pfn_to_page(pfn));
}
@@ -392,15 +392,19 @@ static struct notifier_block xenstore_notifier;
static int __init balloon_init(void)
{
- unsigned long pfn, extra_pfn_end;
+ unsigned long pfn, nr_pages, extra_pfn_end;
struct page *page;
- if (!xen_pv_domain())
+ if (!xen_domain())
return -ENODEV;
pr_info("xen_balloon: Initialising balloon driver.\n");
- balloon_stats.current_pages = min(xen_start_info->nr_pages, max_pfn);
+ if (xen_pv_domain())
+ nr_pages = xen_start_info->nr_pages;
+ else
+ nr_pages = max_pfn;
+ balloon_stats.current_pages = min(nr_pages, max_pfn);
balloon_stats.target_pages = balloon_stats.current_pages;
balloon_stats.balloon_low = 0;
balloon_stats.balloon_high = 0;
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 7468147..0ad1699 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -114,7 +114,7 @@ struct cpu_evtchn_s {
static __initdata struct cpu_evtchn_s init_evtchn_mask = {
.bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul,
};
-static struct cpu_evtchn_s *cpu_evtchn_mask_p = &init_evtchn_mask;
+static struct cpu_evtchn_s __refdata *cpu_evtchn_mask_p = &init_evtchn_mask;
static inline unsigned long *cpu_evtchn_mask(int cpu)
{
@@ -277,7 +277,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
BUG_ON(irq == -1);
#ifdef CONFIG_SMP
- cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
+ cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
#endif
clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
@@ -294,7 +294,7 @@ static void init_evtchn_cpu_bindings(void)
/* By default all event channels notify CPU#0. */
for_each_irq_desc(i, desc) {
- cpumask_copy(desc->affinity, cpumask_of(0));
+ cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
}
#endif
@@ -376,81 +376,69 @@ static void unmask_evtchn(int port)
put_cpu();
}
-static int get_nr_hw_irqs(void)
+static int xen_allocate_irq_dynamic(void)
{
- int ret = 1;
+ int first = 0;
+ int irq;
#ifdef CONFIG_X86_IO_APIC
- ret = get_nr_irqs_gsi();
+ /*
+ * For an HVM guest or domain 0 which see "real" (emulated or
+ * actual repectively) GSIs we allocate dynamic IRQs
+ * e.g. those corresponding to event channels or MSIs
+ * etc. from the range above those "real" GSIs to avoid
+ * collisions.
+ */
+ if (xen_initial_domain() || xen_hvm_domain())
+ first = get_nr_irqs_gsi();
#endif
- return ret;
-}
+retry:
+ irq = irq_alloc_desc_from(first, -1);
-static int find_unbound_pirq(int type)
-{
- int rc, i;
- struct physdev_get_free_pirq op_get_free_pirq;
- op_get_free_pirq.type = type;
+ if (irq == -ENOMEM && first > NR_IRQS_LEGACY) {
+ printk(KERN_ERR "Out of dynamic IRQ space and eating into GSI space. You should increase nr_irqs\n");
+ first = max(NR_IRQS_LEGACY, first - NR_IRQS_LEGACY);
+ goto retry;
+ }
- rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
- if (!rc)
- return op_get_free_pirq.pirq;
+ if (irq < 0)
+ panic("No available IRQ to bind to: increase nr_irqs!\n");
- for (i = 0; i < nr_irqs; i++) {
- if (pirq_to_irq[i] < 0)
- return i;
- }
- return -1;
+ return irq;
}
-static int find_unbound_irq(void)
+static int xen_allocate_irq_gsi(unsigned gsi)
{
- struct irq_data *data;
- int irq, res;
- int bottom = get_nr_hw_irqs();
- int top = nr_irqs-1;
-
- if (bottom == nr_irqs)
- goto no_irqs;
+ int irq;
- /* This loop starts from the top of IRQ space and goes down.
- * We need this b/c if we have a PCI device in a Xen PV guest
- * we do not have an IO-APIC (though the backend might have them)
- * mapped in. To not have a collision of physical IRQs with the Xen
- * event channels start at the top of the IRQ space for virtual IRQs.
+ /*
+ * A PV guest has no concept of a GSI (since it has no ACPI
+ * nor access to/knowledge of the physical APICs). Therefore
+ * all IRQs are dynamically allocated from the entire IRQ
+ * space.
*/
- for (irq = top; irq > bottom; irq--) {
- data = irq_get_irq_data(irq);
- /* only 15->0 have init'd desc; handle irq > 16 */
- if (!data)
- break;
- if (data->chip == &no_irq_chip)
- break;
- if (data->chip != &xen_dynamic_chip)
- continue;
- if (irq_info[irq].type == IRQT_UNBOUND)
- return irq;
- }
-
- if (irq == bottom)
- goto no_irqs;
+ if (xen_pv_domain() && !xen_initial_domain())
+ return xen_allocate_irq_dynamic();
- res = irq_alloc_desc_at(irq, -1);
+ /* Legacy IRQ descriptors are already allocated by the arch. */
+ if (gsi < NR_IRQS_LEGACY)
+ return gsi;
- if (WARN_ON(res != irq))
- return -1;
+ irq = irq_alloc_desc_at(gsi, -1);
+ if (irq < 0)
+ panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq);
return irq;
-
-no_irqs:
- panic("No available IRQ to bind to: increase nr_irqs!\n");
}
-static bool identity_mapped_irq(unsigned irq)
+static void xen_free_irq(unsigned irq)
{
- /* identity map all the hardware irqs */
- return irq < get_nr_hw_irqs();
+ /* Legacy IRQ descriptors are managed by the arch. */
+ if (irq < NR_IRQS_LEGACY)
+ return;
+
+ irq_free_desc(irq);
}
static void pirq_unmask_notify(int irq)
@@ -486,7 +474,7 @@ static bool probing_irq(int irq)
return desc && desc->action == NULL;
}
-static unsigned int startup_pirq(unsigned int irq)
+static unsigned int __startup_pirq(unsigned int irq)
{
struct evtchn_bind_pirq bind_pirq;
struct irq_info *info = info_for_irq(irq);
@@ -524,9 +512,15 @@ out:
return 0;
}
-static void shutdown_pirq(unsigned int irq)
+static unsigned int startup_pirq(struct irq_data *data)
+{
+ return __startup_pirq(data->irq);
+}
+
+static void shutdown_pirq(struct irq_data *data)
{
struct evtchn_close close;
+ unsigned int irq = data->irq;
struct irq_info *info = info_for_irq(irq);
int evtchn = evtchn_from_irq(irq);
@@ -546,20 +540,20 @@ static void shutdown_pirq(unsigned int irq)
info->evtchn = 0;
}
-static void enable_pirq(unsigned int irq)
+static void enable_pirq(struct irq_data *data)
{
- startup_pirq(irq);
+ startup_pirq(data);
}
-static void disable_pirq(unsigned int irq)
+static void disable_pirq(struct irq_data *data)
{
}
-static void ack_pirq(unsigned int irq)
+static void ack_pirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(irq);
+ int evtchn = evtchn_from_irq(data->irq);
- move_native_irq(irq);
+ move_native_irq(data->irq);
if (VALID_EVTCHN(evtchn)) {
mask_evtchn(evtchn);
@@ -567,23 +561,6 @@ static void ack_pirq(unsigned int irq)
}
}
-static void end_pirq(unsigned int irq)
-{
- int evtchn = evtchn_from_irq(irq);
- struct irq_desc *desc = irq_to_desc(irq);
-
- if (WARN_ON(!desc))
- return;
-
- if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) ==
- (IRQ_DISABLED|IRQ_PENDING)) {
- shutdown_pirq(irq);
- } else if (VALID_EVTCHN(evtchn)) {
- unmask_evtchn(evtchn);
- pirq_unmask_notify(irq);
- }
-}
-
static int find_irq_by_gsi(unsigned gsi)
{
int irq;
@@ -638,14 +615,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
goto out; /* XXX need refcount? */
}
- /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore
- * we are using the !xen_initial_domain() to drop in the function.*/
- if (identity_mapped_irq(gsi) || (!xen_initial_domain() &&
- xen_pv_domain())) {
- irq = gsi;
- irq_alloc_desc_at(irq, -1);
- } else
- irq = find_unbound_irq();
+ irq = xen_allocate_irq_gsi(gsi);
set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
handle_level_irq, name);
@@ -658,7 +628,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
* this in the priv domain. */
if (xen_initial_domain() &&
HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
- irq_free_desc(irq);
+ xen_free_irq(irq);
irq = -ENOSPC;
goto out;
}
@@ -674,87 +644,46 @@ out:
}
#ifdef CONFIG_PCI_MSI
-#include <linux/msi.h>
-#include "../pci/msi.h"
-
-void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc)
+int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
{
- spin_lock(&irq_mapping_update_lock);
-
- if (alloc & XEN_ALLOC_IRQ) {
- *irq = find_unbound_irq();
- if (*irq == -1)
- goto out;
- }
-
- if (alloc & XEN_ALLOC_PIRQ) {
- *pirq = find_unbound_pirq(MAP_PIRQ_TYPE_MSI);
- if (*pirq == -1)
- goto out;
- }
+ int rc;
+ struct physdev_get_free_pirq op_get_free_pirq;
- set_irq_chip_and_handler_name(*irq, &xen_pirq_chip,
- handle_level_irq, name);
+ op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
+ rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
- irq_info[*irq] = mk_pirq_info(0, *pirq, 0, 0);
- pirq_to_irq[*pirq] = *irq;
+ WARN_ONCE(rc == -ENOSYS,
+ "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
-out:
- spin_unlock(&irq_mapping_update_lock);
+ return rc ? -1 : op_get_free_pirq.pirq;
}
-int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type)
+int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
+ int pirq, int vector, const char *name)
{
- int irq = -1;
- struct physdev_map_pirq map_irq;
- int rc;
- int pos;
- u32 table_offset, bir;
-
- memset(&map_irq, 0, sizeof(map_irq));
- map_irq.domid = DOMID_SELF;
- map_irq.type = MAP_PIRQ_TYPE_MSI;
- map_irq.index = -1;
- map_irq.pirq = -1;
- map_irq.bus = dev->bus->number;
- map_irq.devfn = dev->devfn;
-
- if (type == PCI_CAP_ID_MSIX) {
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
-
- pci_read_config_dword(dev, msix_table_offset_reg(pos),
- &table_offset);
- bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
-
- map_irq.table_base = pci_resource_start(dev, bir);
- map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
- }
+ int irq, ret;
spin_lock(&irq_mapping_update_lock);
- irq = find_unbound_irq();
-
+ irq = xen_allocate_irq_dynamic();
if (irq == -1)
goto out;
- rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
- if (rc) {
- printk(KERN_WARNING "xen map irq failed %d\n", rc);
-
- irq_free_desc(irq);
-
- irq = -1;
- goto out;
- }
- irq_info[irq] = mk_pirq_info(0, map_irq.pirq, 0, map_irq.index);
-
set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
- handle_level_irq,
- (type == PCI_CAP_ID_MSIX) ? "msi-x":"msi");
+ handle_level_irq, name);
+ irq_info[irq] = mk_pirq_info(0, pirq, 0, vector);
+ pirq_to_irq[pirq] = irq;
+ ret = irq_set_msi_desc(irq, msidesc);
+ if (ret < 0)
+ goto error_irq;
out:
spin_unlock(&irq_mapping_update_lock);
return irq;
+error_irq:
+ spin_unlock(&irq_mapping_update_lock);
+ xen_free_irq(irq);
+ return -1;
}
#endif
@@ -779,11 +708,12 @@ int xen_destroy_irq(int irq)
printk(KERN_WARNING "unmap irq failed %d\n", rc);
goto out;
}
- pirq_to_irq[info->u.pirq.pirq] = -1;
}
+ pirq_to_irq[info->u.pirq.pirq] = -1;
+
irq_info[irq] = mk_unbound_info();
- irq_free_desc(irq);
+ xen_free_irq(irq);
out:
spin_unlock(&irq_mapping_update_lock);
@@ -814,7 +744,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
irq = evtchn_to_irq[evtchn];
if (irq == -1) {
- irq = find_unbound_irq();
+ irq = xen_allocate_irq_dynamic();
set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
handle_fasteoi_irq, "event");
@@ -839,7 +769,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
irq = per_cpu(ipi_to_irq, cpu)[ipi];
if (irq == -1) {
- irq = find_unbound_irq();
+ irq = xen_allocate_irq_dynamic();
if (irq < 0)
goto out;
@@ -875,7 +805,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
irq = per_cpu(virq_to_irq, cpu)[virq];
if (irq == -1) {
- irq = find_unbound_irq();
+ irq = xen_allocate_irq_dynamic();
set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
handle_percpu_irq, "virq");
@@ -934,7 +864,7 @@ static void unbind_from_irq(unsigned int irq)
if (irq_info[irq].type != IRQT_UNBOUND) {
irq_info[irq] = mk_unbound_info();
- irq_free_desc(irq);
+ xen_free_irq(irq);
}
spin_unlock(&irq_mapping_update_lock);
@@ -990,7 +920,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
if (irq < 0)
return irq;
- irqflags |= IRQF_NO_SUSPEND;
+ irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
retval = request_irq(irq, handler, irqflags, devname, dev_id);
if (retval != 0) {
unbind_from_irq(irq);
@@ -1234,11 +1164,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
return 0;
}
-static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
+static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
+ bool force)
{
unsigned tcpu = cpumask_first(dest);
- return rebind_irq_to_cpu(irq, tcpu);
+ return rebind_irq_to_cpu(data->irq, tcpu);
}
int resend_irq_on_evtchn(unsigned int irq)
@@ -1257,35 +1188,35 @@ int resend_irq_on_evtchn(unsigned int irq)
return 1;
}
-static void enable_dynirq(unsigned int irq)
+static void enable_dynirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(irq);
+ int evtchn = evtchn_from_irq(data->irq);
if (VALID_EVTCHN(evtchn))
unmask_evtchn(evtchn);
}
-static void disable_dynirq(unsigned int irq)
+static void disable_dynirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(irq);
+ int evtchn = evtchn_from_irq(data->irq);
if (VALID_EVTCHN(evtchn))
mask_evtchn(evtchn);
}
-static void ack_dynirq(unsigned int irq)
+static void ack_dynirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(irq);
+ int evtchn = evtchn_from_irq(data->irq);
- move_masked_irq(irq);
+ move_masked_irq(data->irq);
if (VALID_EVTCHN(evtchn))
unmask_evtchn(evtchn);
}
-static int retrigger_dynirq(unsigned int irq)
+static int retrigger_dynirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(irq);
+ int evtchn = evtchn_from_irq(data->irq);
struct shared_info *sh = HYPERVISOR_shared_info;
int ret = 0;
@@ -1334,7 +1265,7 @@ static void restore_cpu_pirqs(void)
printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
- startup_pirq(irq);
+ __startup_pirq(irq);
}
}
@@ -1445,7 +1376,6 @@ void xen_poll_irq(int irq)
void xen_irq_resume(void)
{
unsigned int cpu, irq, evtchn;
- struct irq_desc *desc;
init_evtchn_cpu_bindings();
@@ -1465,66 +1395,48 @@ void xen_irq_resume(void)
restore_cpu_ipis(cpu);
}
- /*
- * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These
- * are not handled by the IRQ core.
- */
- for_each_irq_desc(irq, desc) {
- if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND))
- continue;
- if (desc->status & IRQ_DISABLED)
- continue;
-
- evtchn = evtchn_from_irq(irq);
- if (evtchn == -1)
- continue;
-
- unmask_evtchn(evtchn);
- }
-
restore_cpu_pirqs();
}
static struct irq_chip xen_dynamic_chip __read_mostly = {
- .name = "xen-dyn",
+ .name = "xen-dyn",
- .disable = disable_dynirq,
- .mask = disable_dynirq,
- .unmask = enable_dynirq,
+ .irq_disable = disable_dynirq,
+ .irq_mask = disable_dynirq,
+ .irq_unmask = enable_dynirq,
- .eoi = ack_dynirq,
- .set_affinity = set_affinity_irq,
- .retrigger = retrigger_dynirq,
+ .irq_eoi = ack_dynirq,
+ .irq_set_affinity = set_affinity_irq,
+ .irq_retrigger = retrigger_dynirq,
};
static struct irq_chip xen_pirq_chip __read_mostly = {
- .name = "xen-pirq",
+ .name = "xen-pirq",
- .startup = startup_pirq,
- .shutdown = shutdown_pirq,
+ .irq_startup = startup_pirq,
+ .irq_shutdown = shutdown_pirq,
- .enable = enable_pirq,
- .unmask = enable_pirq,
+ .irq_enable = enable_pirq,
+ .irq_unmask = enable_pirq,
- .disable = disable_pirq,
- .mask = disable_pirq,
+ .irq_disable = disable_pirq,
+ .irq_mask = disable_pirq,
- .ack = ack_pirq,
- .end = end_pirq,
+ .irq_ack = ack_pirq,
- .set_affinity = set_affinity_irq,
+ .irq_set_affinity = set_affinity_irq,
- .retrigger = retrigger_dynirq,
+ .irq_retrigger = retrigger_dynirq,
};
static struct irq_chip xen_percpu_chip __read_mostly = {
- .name = "xen-percpu",
+ .name = "xen-percpu",
- .disable = disable_dynirq,
- .mask = disable_dynirq,
- .unmask = enable_dynirq,
+ .irq_disable = disable_dynirq,
+ .irq_mask = disable_dynirq,
+ .irq_unmask = enable_dynirq,
- .ack = ack_dynirq,
+ .irq_ack = ack_dynirq,
};
int xen_set_callback_via(uint64_t via)
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index db8c4c4..ebb2928 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -34,32 +34,38 @@ enum shutdown_state {
/* Ignore multiple shutdown requests. */
static enum shutdown_state shutting_down = SHUTDOWN_INVALID;
-#ifdef CONFIG_PM_SLEEP
-static int xen_hvm_suspend(void *data)
-{
- struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
- int *cancelled = data;
-
- BUG_ON(!irqs_disabled());
-
- *cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
+struct suspend_info {
+ int cancelled;
+ unsigned long arg; /* extra hypercall argument */
+ void (*pre)(void);
+ void (*post)(int cancelled);
+};
- xen_hvm_post_suspend(*cancelled);
+static void xen_hvm_post_suspend(int cancelled)
+{
+ xen_arch_hvm_post_suspend(cancelled);
gnttab_resume();
+}
- if (!*cancelled) {
- xen_irq_resume();
- xen_console_resume();
- xen_timer_resume();
- }
+static void xen_pre_suspend(void)
+{
+ xen_mm_pin_all();
+ gnttab_suspend();
+ xen_arch_pre_suspend();
+}
- return 0;
+static void xen_post_suspend(int cancelled)
+{
+ xen_arch_post_suspend(cancelled);
+ gnttab_resume();
+ xen_mm_unpin_all();
}
+#ifdef CONFIG_PM_SLEEP
static int xen_suspend(void *data)
{
+ struct suspend_info *si = data;
int err;
- int *cancelled = data;
BUG_ON(!irqs_disabled());
@@ -70,22 +76,20 @@ static int xen_suspend(void *data)
return err;
}
- xen_mm_pin_all();
- gnttab_suspend();
- xen_pre_suspend();
+ if (si->pre)
+ si->pre();
/*
* This hypercall returns 1 if suspend was cancelled
* or the domain was merely checkpointed, and 0 if it
* is resuming in a new domain.
*/
- *cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
+ si->cancelled = HYPERVISOR_suspend(si->arg);
- xen_post_suspend(*cancelled);
- gnttab_resume();
- xen_mm_unpin_all();
+ if (si->post)
+ si->post(si->cancelled);
- if (!*cancelled) {
+ if (!si->cancelled) {
xen_irq_resume();
xen_console_resume();
xen_timer_resume();
@@ -99,7 +103,7 @@ static int xen_suspend(void *data)
static void do_suspend(void)
{
int err;
- int cancelled = 1;
+ struct suspend_info si;
shutting_down = SHUTDOWN_SUSPEND;
@@ -129,20 +133,29 @@ static void do_suspend(void)
goto out_resume;
}
- if (xen_hvm_domain())
- err = stop_machine(xen_hvm_suspend, &cancelled, cpumask_of(0));
- else
- err = stop_machine(xen_suspend, &cancelled, cpumask_of(0));
+ si.cancelled = 1;
+
+ if (xen_hvm_domain()) {
+ si.arg = 0UL;
+ si.pre = NULL;
+ si.post = &xen_hvm_post_suspend;
+ } else {
+ si.arg = virt_to_mfn(xen_start_info);
+ si.pre = &xen_pre_suspend;
+ si.post = &xen_post_suspend;
+ }
+
+ err = stop_machine(xen_suspend, &si, cpumask_of(0));
dpm_resume_noirq(PMSG_RESUME);
if (err) {
printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
- cancelled = 1;
+ si.cancelled = 1;
}
out_resume:
- if (!cancelled) {
+ if (!si.cancelled) {
xen_arch_resume();
xs_resume();
} else
@@ -162,12 +175,39 @@ out:
}
#endif /* CONFIG_PM_SLEEP */
+struct shutdown_handler {
+ const char *command;
+ void (*cb)(void);
+};
+
+static void do_poweroff(void)
+{
+ shutting_down = SHUTDOWN_POWEROFF;
+ orderly_poweroff(false);
+}
+
+static void do_reboot(void)
+{
+ shutting_down = SHUTDOWN_POWEROFF; /* ? */
+ ctrl_alt_del();
+}
+
static void shutdown_handler(struct xenbus_watch *watch,
const char **vec, unsigned int len)
{
char *str;
struct xenbus_transaction xbt;
int err;
+ static struct shutdown_handler handlers[] = {
+ { "poweroff", do_poweroff },
+ { "halt", do_poweroff },
+ { "reboot", do_reboot },
+#ifdef CONFIG_PM_SLEEP
+ { "suspend", do_suspend },
+#endif
+ {NULL, NULL},
+ };
+ static struct shutdown_handler *handler;
if (shutting_down != SHUTDOWN_INVALID)
return;
@@ -184,7 +224,14 @@ static void shutdown_handler(struct xenbus_watch *watch,
return;
}
- xenbus_write(xbt, "control", "shutdown", "");
+ for (handler = &handlers[0]; handler->command; handler++) {
+ if (strcmp(str, handler->command) == 0)
+ break;
+ }
+
+ /* Only acknowledge commands which we are prepared to handle. */
+ if (handler->cb)
+ xenbus_write(xbt, "control", "shutdown", "");
err = xenbus_transaction_end(xbt, 0);
if (err == -EAGAIN) {
@@ -192,17 +239,8 @@ static void shutdown_handler(struct xenbus_watch *watch,
goto again;
}
- if (strcmp(str, "poweroff") == 0 ||
- strcmp(str, "halt") == 0) {
- shutting_down = SHUTDOWN_POWEROFF;
- orderly_poweroff(false);
- } else if (strcmp(str, "reboot") == 0) {
- shutting_down = SHUTDOWN_POWEROFF; /* ? */
- ctrl_alt_del();
-#ifdef CONFIG_PM_SLEEP
- } else if (strcmp(str, "suspend") == 0) {
- do_suspend();
-#endif
+ if (handler->cb) {
+ handler->cb();
} else {
printk(KERN_INFO "Ignoring shutdown request: %s\n", str);
shutting_down = SHUTDOWN_INVALID;
@@ -281,27 +319,18 @@ static int shutdown_event(struct notifier_block *notifier,
return NOTIFY_DONE;
}
-static int __init __setup_shutdown_event(void)
-{
- /* Delay initialization in the PV on HVM case */
- if (xen_hvm_domain())
- return 0;
-
- if (!xen_pv_domain())
- return -ENODEV;
-
- return xen_setup_shutdown_event();
-}
-
int xen_setup_shutdown_event(void)
{
static struct notifier_block xenstore_notifier = {
.notifier_call = shutdown_event
};
+
+ if (!xen_domain())
+ return -ENODEV;
register_xenstore_notifier(&xenstore_notifier);
return 0;
}
EXPORT_SYMBOL_GPL(xen_setup_shutdown_event);
-subsys_initcall(__setup_shutdown_event);
+subsys_initcall(xen_setup_shutdown_event);
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index afbe041..319dd0a 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -156,9 +156,6 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
if (ret)
goto out;
xenbus_probe(NULL);
- ret = xen_setup_shutdown_event();
- if (ret)
- goto out;
return 0;
out:
OpenPOWER on IntegriCloud